aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Torokhov <torokhov-s-a@yandex.ru>2020-05-12 01:56:45 +0300
committerSergey Torokhov <torokhov-s-a@yandex.ru>2020-05-12 01:56:45 +0300
commit021c7068ab1f61ef171029cbb243d13f1b33237e (patch)
tree10bb41fd62bbe6821f4013f0afcbf8b3a1fa0789 /sci-physics
parentdev-python/quantities: fix test and doc (diff)
downloadguru-021c7068ab1f61ef171029cbb243d13f1b33237e.tar.gz
guru-021c7068ab1f61ef171029cbb243d13f1b33237e.tar.bz2
guru-021c7068ab1f61ef171029cbb243d13f1b33237e.zip
sci-physics/SU2: new package
The SU2 package contains several bunbled libraries that currently aren't unbundled in ebuild. CGNS: Tried to unbundled CNGS but it failed to compiled against system gcnslib-3.3.0; successfully compiled against cgnslib-3.4.0 but related tests are failed. Metis, Parmetis: This packages couldn't be installed simultaneously in Gentoo and required by build system if compiled via meson build system with mpi option being enabled. They could be optionaly chosen if autotools build system is used (ebuild uses meson). Some addidional features disabled due to their experimantal status or due to requirement to download additional third-party libraries. At this moment ebuild doesn't provide such features to be built. They are also will be bundled if implemented and compiled statically. Signed-off-by: Sergey Torokhov <torokhov-s-a@yandex.ru>
Diffstat (limited to 'sci-physics')
-rw-r--r--sci-physics/SU2/Manifest3
-rw-r--r--sci-physics/SU2/SU2-7.0.4.ebuild112
-rw-r--r--sci-physics/SU2/files/SU2-7.0.4-fix-env.patch24
-rw-r--r--sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch2302
-rw-r--r--sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch31
-rw-r--r--sci-physics/SU2/metadata.xml28
6 files changed, 2500 insertions, 0 deletions
diff --git a/sci-physics/SU2/Manifest b/sci-physics/SU2/Manifest
new file mode 100644
index 000000000..6ebe00f76
--- /dev/null
+++ b/sci-physics/SU2/Manifest
@@ -0,0 +1,3 @@
+DIST SU2-7.0.4-TestCases.tar.gz 437960103 BLAKE2B 2469edc23f62589fa18be5fff6e036965f6b5f6e2be207642d318aac4d2044c07f0891568f86c1a3ab065e79afce50cc73ad0857b82093d79ac28a4d0451a4ad SHA512 f21d963815e024582e99647a21ebae0b17fc69f75bc34bb72cc3a86cc9ff8502342b31755b5da73e7088b4d0ce430bdd6b4efefc03583cbfcf5156c1849328e1
+DIST SU2-7.0.4-Tutorials.tar.gz 64282233 BLAKE2B b0d13a0988d5617868fad6098fe8110e3600415f05784ff04416cb23162fadc8c1d06d50c5200b14f65afb3e97ee766b21dfdcd4ec8ded9026baf510ca829e48 SHA512 604a05e15a8eae1c7255016261a6576a97fc364f66004ecaccaae932e3a97624c2599d354dd874562824caa8f8ea3dac2f03e0105b1c27d66ec0bf59e3a27105
+DIST SU2-7.0.4.tar.gz 20516147 BLAKE2B 21f45e4918bbc6a72bf47ad61d3301abed50a7cf569e9e8d4040201ff653e583d50a547853365302671922f023d0cc6f3735c1afcd0f3b6bf3c3fc92dc807787 SHA512 8e69f0e1d335adef0bd98666c98e29bc15ee0d7a0fcbbbc91a1ba02275ca52fda7f8f47434547f7982ce0e73a6ff78bd2ed57ca328d1e87b8afdd3b0a698d262
diff --git a/sci-physics/SU2/SU2-7.0.4.ebuild b/sci-physics/SU2/SU2-7.0.4.ebuild
new file mode 100644
index 000000000..1884b11b4
--- /dev/null
+++ b/sci-physics/SU2/SU2-7.0.4.ebuild
@@ -0,0 +1,112 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=7
+
+PYTHON_COMPAT=( python3_{6,7,8} )
+
+inherit meson python-single-r1
+
+DESCRIPTION="SU2: An Open-Source Suite for Multiphysics Simulation and Design"
+HOMEPAGE="https://su2code.github.io/"
+SRC_URI="
+ https://github.com/su2code/SU2/archive/v${PV}.tar.gz -> ${P}.tar.gz
+ test? ( https://github.com/su2code/TestCases/archive/v${PV}.tar.gz -> ${P}-TestCases.tar.gz )
+ tutorials? ( https://github.com/su2code/Tutorials/archive/v${PV}.tar.gz -> ${P}-Tutorials.tar.gz )
+"
+
+LICENSE="LGPL-2.1"
+SLOT="0"
+KEYWORDS="~amd64"
+
+IUSE="cgns -mkl +mpi openblas tecio test tutorials"
+RESTRICT="!test? ( test )"
+REQUIRED_USE="
+ ${PYTHON_REQUIRED_USE}
+ mkl? ( !openblas )
+"
+
+RDEPEND="
+ ${PYTHON_DEPS}
+ mpi? ( virtual/mpi[cxx] )
+ mkl? ( sci-libs/mkl )
+ openblas? ( sci-libs/openblas )
+"
+DEPEND="
+ ${RDEPEND}
+ tecio? ( dev-libs/boost:= )
+"
+BDEPEND="virtual/pkgconfig"
+
+PATCHES=(
+ "${FILESDIR}/${P}-fix-env.patch"
+ "${FILESDIR}/${P}-unbundle_boost.patch"
+ "${FILESDIR}/${P}-fix-python-optimize.patch"
+)
+
+DOCS=( "LICENSE.md" "README.md" "SU2_PY/documentation.txt" )
+
+src_unpack() {
+ unpack "${P}.tar.gz"
+ if use test ; then
+ einfo "Unpacking ${P}-TestCases.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}/TestCases"
+ tar -C "${P}"/TestCases --strip-components=1 -xzf "${DISTDIR}/${P}-TestCases.tar.gz" || die
+ fi
+ if use tutorials ; then
+ einfo "Unpacking ${P}-Tutorials.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}"
+ mkdir "${P}"/Tutorials
+ tar -C "${P}"/Tutorials --strip-components=1 -xzf "${DISTDIR}/${P}-Tutorials.tar.gz" || die
+ fi
+}
+
+src_configure() {
+ local emesonargs=(
+ -Denable-autodiff=false
+ -Denable-directdiff=false
+ -Denable-pastix=false
+ -Denable-pywrapper=false
+ -Dwith-omp=false
+ $(meson_feature mpi with-mpi)
+ $(meson_use cgns enable-cgns)
+ $(meson_use mkl enable-mkl)
+ $(meson_use openblas enable-openblas)
+ $(meson_use tecio enable-tecio)
+ $(meson_use test enable-tests)
+ )
+ meson_src_configure
+}
+
+src_test() {
+ ln -s ../../${P}-build/SU2_CFD/src/SU2_CFD SU2_PY/SU2_CFD
+ ln -s ../../${P}-build/SU2_DEF/src/SU2_DEF SU2_PY/SU2_DEF
+ ln -s ../../${P}-build/SU2_DOT/src/SU2_DOT SU2_PY/SU2_DOT
+ ln -s ../../${P}-build/SU2_GEO/src/SU2_GEO SU2_PY/SU2_GEO
+ ln -s ../../${P}-build/SU2_MSH/src/SU2_MSH SU2_PY/SU2_MSH
+ ln -s ../../${P}-build/SU2_SOL/src/SU2_SOL SU2_PY/SU2_SOL
+
+ export SU2_RUN="${S}/SU2_PY"
+ export SU2_HOME="${S}"
+ export PATH=$PATH:$SU2_RUN
+ export PYTHONPATH=$PYTHONPATH:$SU2_RUN
+
+ einfo "Running UnitTests ..."
+ ../${P}-build/UnitTests/test_driver
+
+ pushd TestCases/
+ use mpi && python parallel_regression.py
+ use mpi || python serial_regression.py
+ use tutorials && use mpi && python tutorials.py
+ popd
+}
+
+src_install() {
+ meson_src_install
+ mkdir -p "${ED}$(python_get_sitedir)"
+ mv "${ED}"/usr/bin/{FSI,SU2,*.py} -t "${ED}$(python_get_sitedir)"
+ python_optimize "${D}/$(python_get_sitedir)"
+
+ if use tutorials ; then
+ insinto "/usr/share/${P}"
+ doins -r Tutorials
+ fi
+}
diff --git a/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch b/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch
new file mode 100644
index 000000000..3f65764c6
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch
@@ -0,0 +1,24 @@
+diff -Naur old_env/SU2_CFD/include/output/tools/CWindowingTools.hpp new_env/SU2_CFD/include/output/tools/CWindowingTools.hpp
+--- old_env/SU2_CFD/include/output/tools/CWindowingTools.hpp 2020-03-31 12:26:03.000000000 +0300
++++ new_env/SU2_CFD/include/output/tools/CWindowingTools.hpp 2020-05-10 17:04:24.000000000 +0300
+@@ -28,7 +28,7 @@
+ #pragma once
+
+ #include <vector>
+-#include "../../../Common/include/option_structure.hpp"
++#include "../../../../Common/include/option_structure.hpp"
+
+ class CWindowingTools{
+ public:
+diff -Naur old_env/UnitTests/meson.build new_env/UnitTests/meson.build
+--- old_env/UnitTests/meson.build 2020-05-10 17:03:43.000000000 +0300
++++ new_env/UnitTests/meson.build 2020-05-10 17:04:35.000000000 +0300
+@@ -24,7 +24,7 @@
+ test_driver = executable(
+ 'test_driver',
+ unit_test_files,
+- install : true,
++ install : false,
+ dependencies : [su2_cfd_dep, common_dep, su2_deps, catch2_dep],
+ cpp_args: ['-fPIC', default_warning_flags, su2_cpp_args]
+ )
diff --git a/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch b/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch
new file mode 100644
index 000000000..6ad7387e5
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch
@@ -0,0 +1,2302 @@
+diff -Naur old/SU2_PY/FSI/FSIInterface.py new/SU2_PY/FSI/FSIInterface.py
+--- old/SU2_PY/FSI/FSIInterface.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/FSIInterface.py 2020-05-10 16:17:07.000000000 +0300
+@@ -6,8 +6,8 @@
+ # \version 7.0.4 "Blackbird"
+ #
+ # SU2 Project Website: https://su2code.github.io
+-#
+-# The SU2 Project is maintained by the SU2 Foundation
++#
++# The SU2 Project is maintained by the SU2 Foundation
+ # (http://su2foundation.org)
+ #
+ # Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md)
+@@ -16,7 +16,7 @@
+ # modify it under the terms of the GNU Lesser General Public
+ # License as published by the Free Software Foundation; either
+ # version 2.1 of the License, or (at your option) any later version.
+-#
++#
+ # SU2 is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+@@ -42,19 +42,19 @@
+ # ----------------------------------------------------------------------
+
+ class Interface:
+- """
++ """
+ FSI interface class that handles fluid/solid solvers synchronisation and communication
+ """
+-
++
+ def __init__(self, FSI_config, FluidSolver, SolidSolver, have_MPI):
+- """
+- Class constructor. Declare some variables and do some screen outputs.
+- """
+-
++ """
++ Class constructor. Declare some variables and do some screen outputs.
++ """
++
+ if have_MPI == True:
+ from mpi4py import MPI
+ self.MPI = MPI
+- self.comm = MPI.COMM_WORLD #MPI World communicator
++ self.comm = MPI.COMM_WORLD #MPI World communicator
+ self.have_MPI = True
+ myid = self.comm.Get_rank()
+ else:
+@@ -62,42 +62,42 @@
+ self.have_MPI = False
+ myid = 0
+
+- self.rootProcess = 0 #the root process is chosen to be MPI rank = 0
++ self.rootProcess = 0 #the root process is chosen to be MPI rank = 0
+
+- self.nDim = FSI_config['NDIM'] #problem dimension
++ self.nDim = FSI_config['NDIM'] #problem dimension
+
+- self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank
+- self.haveSolidSolver = False #True if the solid solver is initialized on the current rank
+- self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node
+- self.haveSolidInterface = False #True if the current rank owns at least one solid interface node
++ self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank
++ self.haveSolidSolver = False #True if the solid solver is initialized on the current rank
++ self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node
++ self.haveSolidInterface = False #True if the current rank owns at least one solid interface node
+
+- self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized
+- self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized
++ self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized
++ self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized
+ self.fluidInterfaceProcessors = list() #list of partitions where there are fluid interface nodes
+- self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes
++ self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes
+
+- self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver
+- self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver
++ self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver
++ self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver
+
+- self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions
+- self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions
++ self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions
++ self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions
+
+- self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition
+- self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes
+- self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition
+- self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes
+-
+- self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition
+- self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition
+- self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition
+- self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions
+- self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions
+-
+- self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition
+- self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition
+- self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition
+- self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions
+- self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions
++ self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition
++ self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes
++ self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition
++ self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes
++
++ self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition
++ self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition
++ self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition
++ self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions
++ self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions
++
++ self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition
++ self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition
++ self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition
++ self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions
++ self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions
+
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MappingMatrixA = None
+@@ -106,83 +106,83 @@
+ self.MappingMatrixB_T = None
+ self.d_RBF = self.nDim+1
+ else:
+- self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping
+- self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping
++ self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping
++ self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping
+ self.d_RBF = 0
+
+- self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping)
++ self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping)
+ self.localFluidInterface_array_Y_init = None
+ self.localFluidInterface_array_Z_init = None
+
+- self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only)
++ self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only)
+
+- self.solidInterface_array_DispX = None #solid interface displacement
++ self.solidInterface_array_DispX = None #solid interface displacement
+ self.solidInterface_array_DispY = None
+ self.solidInterface_array_DispZ = None
+
+- self.solidInterfaceResidual_array_X = None #solid interface position residual
++ self.solidInterfaceResidual_array_X = None #solid interface position residual
+ self.solidInterfaceResidual_array_Y = None
+ self.solidInterfaceResidual_array_Z = None
+
+- self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration
++ self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration
+ self.solidInterfaceResidualnM1_array_Y = None
+ self.solidInterfaceResidualnM1_array_Z = None
+-
+- self.fluidInterface_array_DispX = None #fluid interface displacement
++
++ self.fluidInterface_array_DispX = None #fluid interface displacement
+ self.fluidInterface_array_DispY = None
+ self.fluidInterface_array_DispZ = None
+
+- self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface
++ self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface
+ self.fluidLoads_array_Y = None
+ self.fluidLoads_array_Z = None
+
+- self.solidLoads_array_X = None #loads on the solid side of the f/s interface
++ self.solidLoads_array_X = None #loads on the solid side of the f/s interface
+ self.solidLoads_array_Y = None
+ self.solidLoads_array_Z = None
+
+- self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method
+- self.FSIIter = 0 #current FSI iteration
+- self.unsteady = False #flag for steady or unsteady simulation (default is steady)
+-
+- # ---Some screen output ---
+- self.MPIPrint('Fluid solver : SU2_CFD')
+- self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER']))
++ self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method
++ self.FSIIter = 0 #current FSI iteration
++ self.unsteady = False #flag for steady or unsteady simulation (default is steady)
++
++ # ---Some screen output ---
++ self.MPIPrint('Fluid solver : SU2_CFD')
++ self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER']))
+
+- if FSI_config['TIME_MARCHING'] == 'YES':
++ if FSI_config['TIME_MARCHING'] == 'YES':
+ self.MPIPrint('Unsteady coupled simulation with physical time step : {} s'.format(FSI_config['UNST_TIMESTEP']))
+ self.unsteady = True
+- else:
+- self.MPIPrint('Steady coupled simulation')
++ else:
++ self.MPIPrint('Steady coupled simulation')
+
+- if FSI_config['MATCHING_MESH'] == 'YES':
+- self.MPIPrint('Matching fluid-solid interface')
+- else:
++ if FSI_config['MATCHING_MESH'] == 'YES':
++ self.MPIPrint('Matching fluid-solid interface')
++ else:
+ if FSI_config['MESH_INTERP_METHOD'] == 'TPS':
+- self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation')
++ self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation')
+ elif FSI_config['MESH_INTERP_METHOD'] == 'RBF':
+ self.MPIPrint('Non matching fluid-solid interface with Radial Basis Function interpolation')
+ self.RBF_rad = FSI_config['RBF_RADIUS']
+- self.MPIPrint('Radius value : {}'.format(self.RBF_rad))
++ self.MPIPrint('Radius value : {}'.format(self.RBF_rad))
+ else:
+- self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation')
++ self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation')
+
+- self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED']))
++ self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED']))
+
+- self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER']))
++ self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER']))
+
+- self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE']))
++ self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE']))
+
+- if FSI_config['AITKEN_RELAX'] == 'STATIC':
+- self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM']))
+- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
+- self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM']))
+- else:
+- self.MPIPrint('No Aitken under-relaxation')
++ if FSI_config['AITKEN_RELAX'] == 'STATIC':
++ self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM']))
++ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
++ self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM']))
++ else:
++ self.MPIPrint('No Aitken under-relaxation')
+
+ self.MPIPrint('FSI interface is set')
+
+ def MPIPrint(self, message):
+- """
++ """
+ Print a message on screen only from the master process.
+ """
+
+@@ -198,28 +198,28 @@
+ """
+ Perform a synchronization barrier in case of parallel run with MPI.
+ """
+-
++
+ if self.have_MPI == True:
+ self.comm.barrier()
+
+ def connect(self, FSI_config, FluidSolver, SolidSolver):
+- """
+- Connection between solvers.
+- Creates the communication support between the two solvers.
+- Gets information about f/s interfaces from the two solvers.
+- """
++ """
++ Connection between solvers.
++ Creates the communication support between the two solvers.
++ Gets information about f/s interfaces from the two solvers.
++ """
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+- MPIsize = self.comm.Get_size()
++ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+-
+- # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
++
++ # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
+ self.fluidInterfaceIdentifier = None
+ self.nLocalFluidInterfaceNodes = 0
+ if FluidSolver != None:
+- print('Fluid solver is initialized on process {}'.format(myid))
++ print('Fluid solver is initialized on process {}'.format(myid))
+ self.haveFluidSolver = True
+ allMovingMarkersTags = FluidSolver.GetAllMovingMarkersTag()
+ allMarkersID = FluidSolver.GetAllBoundaryMarkers()
+@@ -229,23 +229,23 @@
+ if allMovingMarkersTags[0] in allMarkersID.keys():
+ self.fluidInterfaceIdentifier = allMarkersID[allMovingMarkersTags[0]]
+ if self.fluidInterfaceIdentifier != None:
+- self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier)
+- if self.nLocalFluidInterfaceNodes != 0:
++ self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier)
++ if self.nLocalFluidInterfaceNodes != 0:
+ self.haveFluidInterface = True
+- print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
+- else:
+- pass
++ print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
++ else:
++ pass
+
+- if SolidSolver != None:
+- print('Solid solver is initialized on process {}'.format(myid))
++ if SolidSolver != None:
++ print('Solid solver is initialized on process {}'.format(myid))
+ self.haveSolidSolver = True
+- self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID()
+- self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier)
+- if self.nLocalSolidInterfaceNodes != 0:
++ self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID()
++ self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier)
++ if self.nLocalSolidInterfaceNodes != 0:
+ self.haveSolidInterface = True
+ print('Number of interface solid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalSolidInterfaceNodes))
+- else:
+- pass
++ else:
++ pass
+
+ # --- Exchange information about processors on which the solvers are defined and where the interface nodes are lying ---
+ if self.have_MPI == True:
+@@ -266,18 +266,18 @@
+ else:
+ sendBufSolidInterface = np.array(int(0))
+ rcvBufFluid = np.zeros(MPIsize, dtype = int)
+- rcvBufSolid = np.zeros(MPIsize, dtype = int)
++ rcvBufSolid = np.zeros(MPIsize, dtype = int)
+ rcvBufFluidInterface = np.zeros(MPIsize, dtype = int)
+- rcvBufSolidInterface = np.zeros(MPIsize, dtype = int)
++ rcvBufSolidInterface = np.zeros(MPIsize, dtype = int)
+ self.comm.Allgather(sendBufFluid, rcvBufFluid)
+ self.comm.Allgather(sendBufSolid, rcvBufSolid)
+ self.comm.Allgather(sendBufFluidInterface, rcvBufFluidInterface)
+ self.comm.Allgather(sendBufSolidInterface, rcvBufSolidInterface)
+ for iProc in range(MPIsize):
+- if rcvBufFluid[iProc] == 1:
++ if rcvBufFluid[iProc] == 1:
+ self.fluidSolverProcessors.append(iProc)
+ if rcvBufSolid[iProc] == 1:
+- self.solidSolverProcessors.append(iProc)
++ self.solidSolverProcessors.append(iProc)
+ if rcvBufFluidInterface[iProc] == 1:
+ self.fluidInterfaceProcessors.append(iProc)
+ if rcvBufSolidInterface[iProc] == 1:
+@@ -285,19 +285,19 @@
+ del sendBufFluid, sendBufSolid, rcvBufFluid, rcvBufSolid, sendBufFluidInterface, sendBufSolidInterface, rcvBufFluidInterface, rcvBufSolidInterface
+ else:
+ self.fluidSolverProcessors.append(0)
+- self.solidSolverProcessors.append(0)
++ self.solidSolverProcessors.append(0)
+ self.fluidInterfaceProcessors.append(0)
+ self.solidInterfaceProcessors.append(0)
+
+- self.MPIBarrier()
+-
+- # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) ---
++ self.MPIBarrier()
++
++ # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) ---
+ # Calculate the number of halo nodes on each partition
+ self.nLocalFluidInterfaceHaloNode = 0
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
+ if FluidSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True:
+ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+- self.FluidHaloNodeList[GlobalIndex] = iVertex
++ self.FluidHaloNodeList[GlobalIndex] = iVertex
+ self.nLocalFluidInterfaceHaloNode += 1
+ # Calculate the number of physical (= not halo) nodes on each partition
+ self.nLocalFluidInterfacePhysicalNodes = self.nLocalFluidInterfaceNodes - self.nLocalFluidInterfaceHaloNode
+@@ -308,10 +308,10 @@
+
+ # Same thing for the solid part
+ self.nLocalSolidInterfaceHaloNode = 0
+- #for iVertex in range(self.nLocalSolidInterfaceNodes):
++ #for iVertex in range(self.nLocalSolidInterfaceNodes):
+ #if SoliddSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True:
+ #GlobalIndex = SolidSolver.GetVertexGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+- #self.SolidHaloNodeList[GlobalIndex] = iVertex
++ #self.SolidHaloNodeList[GlobalIndex] = iVertex
+ #self.nLocalSolidInterfaceHaloNode += 1
+ self.nLocalSolidInterfacePhysicalNodes = self.nLocalSolidInterfaceNodes - self.nLocalSolidInterfaceHaloNode
+ if self.have_MPI == True:
+@@ -323,11 +323,11 @@
+ # --- Calculate the total number of nodes (with and without halo) at the fluid interface (sum over all the partitions) and broadcast the number accross all processors ---
+ sendBuffHalo = np.array(int(self.nLocalFluidInterfaceNodes))
+ sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes))
+- rcvBuffHalo = np.zeros(1, dtype=int)
++ rcvBuffHalo = np.zeros(1, dtype=int)
+ rcvBuffPhysical = np.zeros(1, dtype=int)
+- if self.have_MPI == True:
++ if self.have_MPI == True:
+ self.comm.barrier()
+- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
++ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
+ self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM)
+ self.nFluidInterfaceNodes = rcvBuffHalo[0]
+ self.nFluidInterfacePhysicalNodes = rcvBuffPhysical[0]
+@@ -339,11 +339,11 @@
+ # Same thing for the solid part
+ sendBuffHalo = np.array(int(self.nLocalSolidInterfaceNodes))
+ sendBuffPhysical = np.array(int(self.nLocalSolidInterfacePhysicalNodes))
+- rcvBuffHalo = np.zeros(1, dtype=int)
++ rcvBuffHalo = np.zeros(1, dtype=int)
+ rcvBuffPhysical = np.zeros(1, dtype=int)
+ if self.have_MPI == True:
+- self.comm.barrier()
+- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
++ self.comm.barrier()
++ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
+ self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM)
+ self.nSolidInterfaceNodes = rcvBuffHalo[0]
+ self.nSolidInterfacePhysicalNodes = rcvBuffPhysical[0]
+@@ -375,7 +375,7 @@
+ if myid in self.fluidInterfaceProcessors:
+ globalIndexStart = 0
+ for iProc in range(myid):
+- globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
++ globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
+ globalIndexStop = globalIndexStart + self.nLocalFluidInterfacePhysicalNodes-1
+ else:
+ globalIndexStart = 0
+@@ -387,8 +387,8 @@
+ temp[0] = [0,self.nLocalFluidInterfacePhysicalNodes-1]
+ self.fluidGlobalIndexRange = list()
+ self.fluidGlobalIndexRange.append(temp)
+-
+- # Same thing for the solid part
++
++ # Same thing for the solid part
+ if self.have_MPI == True:
+ if myid in self.solidInterfaceProcessors:
+ globalIndexStart = 0
+@@ -404,14 +404,14 @@
+ temp = {}
+ temp[0] = [0,self.nSolidInterfacePhysicalNodes-1]
+ self.solidGlobalIndexRange = list()
+- self.solidGlobalIndexRange.append(temp)
++ self.solidGlobalIndexRange.append(temp)
+
+- self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes))
+- self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes))
++ self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes))
++ self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes))
+ self.MPIPrint('Total number of fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes))
+ self.MPIPrint('Total number of solid interface nodes : {}'.format(self.nSolidInterfacePhysicalNodes))
+
+- self.MPIBarrier()
++ self.MPIBarrier()
+
+ # --- Create all the PETSc vectors required for parallel communication and parallel mesh mapping/interpolation (working for serial too) ---
+ if self.have_MPI == True:
+@@ -432,8 +432,8 @@
+ self.solidInterface_array_DispY.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ self.solidInterface_array_DispZ.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ self.solidInterface_array_DispX.set(0.0)
+- self.solidInterface_array_DispY.set(0.0)
+- self.solidInterface_array_DispZ.set(0.0)
++ self.solidInterface_array_DispY.set(0.0)
++ self.solidInterface_array_DispZ.set(0.0)
+
+ if self.have_MPI == True:
+ self.fluidInterface_array_DispX = PETSc.Vec().create(self.comm)
+@@ -536,30 +536,30 @@
+ self.solidInterfaceResidualnM1_array_Z.set(0.0)
+
+ def interfaceMapping(self,FluidSolver, SolidSolver, FSI_config):
+- """
+- Creates the one-to-one mapping between interfaces in case of matching meshes.
+- Creates the interpolation rules between interfaces in case of non-matching meshes.
+- """
+- if self.have_MPI == True:
++ """
++ Creates the one-to-one mapping between interfaces in case of matching meshes.
++ Creates the interpolation rules between interfaces in case of non-matching meshes.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+- MPIsize = self.comm.Get_size()
++ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+
+- # --- Get the fluid interface from fluid solver on each partition ---
+- GlobalIndex = int()
++ # --- Get the fluid interface from fluid solver on each partition ---
++ GlobalIndex = int()
+ localIndex = 0
+ fluidIndexing_temp = {}
+ self.localFluidInterface_array_X_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ self.localFluidInterface_array_Y_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ self.localFluidInterface_array_Z_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ for iVertex in range(self.nLocalFluidInterfaceNodes):
+- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+- posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex)
+- posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex)
+- posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex)
+- if GlobalIndex in self.FluidHaloNodeList[myid].keys():
++ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
++ posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex)
++ posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex)
++ posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex)
++ if GlobalIndex in self.FluidHaloNodeList[myid].keys():
+ self.haloNodesPositionsInit[GlobalIndex] = (posx, posy, posz)
+ else:
+ fluidIndexing_temp[GlobalIndex] = self.__getGlobalIndex('fluid', myid, localIndex)
+@@ -576,17 +576,17 @@
+ self.fluidIndexing = fluidIndexing_temp.copy()
+ del fluidIndexing_temp
+
+- # --- Get the solid interface from solid solver on each partition ---
++ # --- Get the solid interface from solid solver on each partition ---
+ localIndex = 0
+ solidIndexing_temp = {}
+- self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes)
++ self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes)
+ self.localSolidInterface_array_Y = np.zeros(self.nLocalSolidInterfaceNodes)
+ self.localSolidInterface_array_Z = np.zeros(self.nLocalSolidInterfaceNodes)
+ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+- posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
+- posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
+- posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
++ posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
++ posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
++ posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+@@ -605,14 +605,14 @@
+ del solidIndexing_temp
+
+
+- # --- Create the PETSc parallel interpolation matrix ---
++ # --- Create the PETSc parallel interpolation matrix ---
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ if self.have_MPI == True:
+ self.MappingMatrixA = PETSc.Mat().create(self.comm)
+ self.MappingMatrixB = PETSc.Mat().create(self.comm)
+ self.MappingMatrixA_T = PETSc.Mat().create(self.comm)
+ self.MappingMatrixB_T = PETSc.Mat().create(self.comm)
+- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
++ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
+ self.MappingMatrixA.setType('mpiaij')
+ self.MappingMatrixB.setType('mpiaij')
+ self.MappingMatrixA_T.setType('mpiaij')
+@@ -627,7 +627,7 @@
+ self.MappingMatrixB = PETSc.Mat().create()
+ self.MappingMatrixA_T = PETSc.Mat().create()
+ self.MappingMatrixB_T = PETSc.Mat().create()
+- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
++ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
+ self.MappingMatrixA.setType('aij')
+ self.MappingMatrixB.setType('aij')
+ self.MappingMatrixA_T.setType('aij')
+@@ -637,16 +637,16 @@
+ self.MappingMatrixB.setType('aij')
+ self.MappingMatrixA_T.setType('aij')
+ self.MappingMatrixB_T.setType('aij')
+- self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixA.setUp()
+ self.MappingMatrixA.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixB.setUp()
+ self.MappingMatrixB.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixA_T.setUp()
+ self.MappingMatrixA_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes))
++ self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes))
+ self.MappingMatrixB_T.setUp()
+ self.MappingMatrixB_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+ else:
+@@ -660,21 +660,21 @@
+ self.MappingMatrix_T = PETSc.Mat().create()
+ self.MappingMatrix.setType('aij')
+ self.MappingMatrix_T.setType('aij')
+- self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes))
++ self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes))
+ self.MappingMatrix.setUp()
+ self.MappingMatrix.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes))
++ self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes))
+ self.MappingMatrix_T.setUp()
+ self.MappingMatrix_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+-
+-
++
++
+ # --- Fill the interpolation matrix in parallel (working in serial too) ---
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MPIPrint('Building interpolation matrices...')
+ if self.have_MPI == True:
+ for iProc in self.solidInterfaceProcessors:
+ if myid == iProc:
+- for jProc in self.solidInterfaceProcessors:
++ for jProc in self.solidInterfaceProcessors:
+ self.comm.Send(self.localSolidInterface_array_X, dest=jProc, tag=1)
+ self.comm.Send(self.localSolidInterface_array_Y, dest=jProc, tag=2)
+ self.comm.Send(self.localSolidInterface_array_Z, dest=jProc, tag=3)
+@@ -726,7 +726,7 @@
+ self.TPSMeshMapping_B(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+ else:
+ self.NearestNeighboorMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+- else:
++ else:
+ self.matchingMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+ else:
+ if FSI_config['MATCHING_MESH'] == 'NO':
+@@ -735,10 +735,10 @@
+ elif FSI_config['MESH_INTERP_METHOD'] == 'TPS' :
+ self.TPSMeshMapping_B(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+ else:
+- self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+- else:
++ self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
++ else:
+ self.matchingMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+-
++
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MappingMatrixB.assemblyBegin()
+ self.MappingMatrixB.assemblyEnd()
+@@ -751,9 +751,9 @@
+ self.MappingMatrix_T.assemblyBegin()
+ self.MappingMatrix_T.assemblyEnd()
+ self.MPIPrint("Interpolation matrix is built.")
+-
++
+ self.MPIBarrier()
+-
++
+ del self.localSolidInterface_array_X
+ del self.localSolidInterface_array_Y
+ del self.localSolidInterface_array_Z
+@@ -768,20 +768,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ if self.nFluidInterfacePhysicalNodes != self.nSolidInterfacePhysicalNodes:
+ raise Exception("Fluid and solid interface must have the same number of nodes for matching meshes ! ")
+@@ -822,20 +822,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ # --- For each fluid interface node, find the nearest solid interface node and fill the boolean mapping matrix ---
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+@@ -863,20 +863,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ for iVertexSolid in range(self.nLocalSolidInterfaceNodes):
+ posX = self.localSolidInterface_array_X[iVertexSolid]
+@@ -915,20 +915,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+ posX = self.localFluidInterface_array_X_init[iVertexFluid]
+@@ -965,7 +965,7 @@
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for iVertexSolid in range(self.nLocalSolidInterfaceNodes):
+@@ -999,7 +999,7 @@
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+@@ -1031,7 +1031,7 @@
+ """
+ phi = 0.0
+ eps = distance/rad
+-
++
+ if eps < 1:
+ phi = ((1.0-eps)**4)*(4.0*eps+1.0)
+ else:
+@@ -1044,20 +1044,20 @@
+ Description
+ """
+ phi = 0.0
+-
++
+ if distance > 0.0:
+ phi = (distance**2)*np.log10(distance)
+ else:
+ phi = 0.0
+
+- return phi
++ return phi
+
+
+ def interpolateSolidPositionOnFluidMesh(self, FSI_config):
+- """
+- Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh.
+- """
+- if self.have_MPI == True:
++ """
++ Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ MPIsize = self.comm.Get_size()
+ else:
+@@ -1110,12 +1110,12 @@
+ del gamma_array_DispY
+ del gamma_array_DispZ
+ del KSP_solver
+- else:
++ else:
+ self.MappingMatrix.mult(self.solidInterface_array_DispX, self.fluidInterface_array_DispX)
+ self.MappingMatrix.mult(self.solidInterface_array_DispY, self.fluidInterface_array_DispY)
+ self.MappingMatrix.mult(self.solidInterface_array_DispZ, self.fluidInterface_array_DispZ)
+
+- # --- Checking conservation ---
++ # --- Checking conservation ---
+ WSX = self.solidLoads_array_X.dot(self.solidInterface_array_DispX)
+ WSY = self.solidLoads_array_Y.dot(self.solidInterface_array_DispY)
+ WSZ = self.solidLoads_array_Z.dot(self.solidInterface_array_DispZ)
+@@ -1124,11 +1124,11 @@
+ WFY = self.fluidLoads_array_Y.dot(self.fluidInterface_array_DispY)
+ WFZ = self.fluidLoads_array_Z.dot(self.fluidInterface_array_DispZ)
+
+- self.MPIPrint("Checking f/s interface conservation...")
+- self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ))
+- self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ))
++ self.MPIPrint("Checking f/s interface conservation...")
++ self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ))
++ self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ))
++
+
+-
+ # --- Redistribute the interpolated fluid interface according to the partitions that own the fluid interface ---
+ # Gather the fluid interface on the master process
+ if self.have_MPI == True:
+@@ -1156,7 +1156,7 @@
+ displ = tuple(displ)
+
+ del sendBuffNumber, rcvBuffNumber
+-
++
+ #print("DEBUG MESSAGE From proc {}, counts = {}".format(myid, counts))
+ #print("DEBUG MESSAGE From proc {}, displ = {}".format(myid, displ))
+
+@@ -1213,18 +1213,18 @@
+ del sendBuff
+
+ def interpolateFluidLoadsOnSolidMesh(self, FSI_config):
+- """
+- Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh.
+- """
+- if self.have_MPI == True:
++ """
++ Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+-
++
+ # --- Interpolate (or map) in parallel the fluid interface loads on the solid interface ---
+- #self.MappingMatrix.transpose()
++ #self.MappingMatrix.transpose()
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ if self.have_MPI == True:
+ gamma_array_LoadX = PETSc.Vec().create(self.comm)
+@@ -1280,10 +1280,10 @@
+ self.solidLoads_array_X_recon = None
+ self.solidLoads_array_Y_recon = None
+ self.solidLoads_array_Z_recon = None
+- if myid == self.rootProcess:
+- self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+- self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+- self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ if myid == self.rootProcess:
++ self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ myNumberOfNodes = self.solidLoads_array_X.getArray().shape[0]
+ sendBuffNumber = np.array([myNumberOfNodes], dtype=int)
+ rcvBuffNumber = np.zeros(MPIsize, dtype=int)
+@@ -1293,9 +1293,9 @@
+ displ = np.zeros(MPIsize, dtype=int)
+ for ii in range(rcvBuffNumber.shape[0]):
+ displ[ii] = rcvBuffNumber[0:ii].sum()
+- displ = tuple(displ)
++ displ = tuple(displ)
+
+- del sendBuffNumber, rcvBuffNumber
++ del sendBuffNumber, rcvBuffNumber
+
+ self.comm.Gatherv(self.solidLoads_array_X.getArray(), [self.solidLoads_array_X_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess)
+ self.comm.Gatherv(self.solidLoads_array_Y.getArray(), [self.solidLoads_array_Y_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess)
+@@ -1336,25 +1336,25 @@
+
+
+ '''def getSolidInterfacePosition(self, SolidSolver):
+- """
+- Gets the current solid interface position from the solid solver.
+- """
++ """
++ Gets the current solid interface position from the solid solver.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+- newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
+- newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
+- newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
++ newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
++ newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
++ newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+ self.solidInterface_array_X.setValues([iGlobalVertex],newPosx)
+ self.solidInterface_array_Y.setValues([iGlobalVertex],newPosy)
+@@ -1375,25 +1375,25 @@
+ #print("DEBUG MESSAGE From PROC {} : array_X = {}".format(myid, self.solidInterface_array_X.getArray()))'''
+
+ def getSolidInterfaceDisplacement(self, SolidSolver):
+- """
+- Gets the current solid interface position from the solid solver.
+- """
++ """
++ Gets the current solid interface position from the solid solver.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+- newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
+- newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
+- newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
++ newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
++ newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
++ newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+ self.solidInterface_array_DispX.setValues([iGlobalVertex],newDispx)
+ self.solidInterface_array_DispY.setValues([iGlobalVertex],newDispy)
+@@ -1408,9 +1408,9 @@
+ self.solidInterface_array_DispZ.assemblyEnd()
+
+ def getFluidInterfaceNodalForce(self, FSI_config, FluidSolver):
+- """
+- Gets the fluid interface loads from the fluid solver.
+- """
++ """
++ Gets the fluid interface loads from the fluid solver.
++ """
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ else:
+@@ -1422,17 +1422,17 @@
+ FZ = 0.0
+
+ # --- Get the fluid interface loads from the fluid solver and directly fill the corresponding PETSc vector ---
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
+- halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces
+- if halo==False:
+- if FSI_config['CSD_SOLVER'] == 'GETDP':
+- newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex)
+- newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex)
+- newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex)
+- else:
+- newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex)
+- newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex)
+- newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
++ halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces
++ if halo==False:
++ if FSI_config['CSD_SOLVER'] == 'GETDP':
++ newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex)
++ newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex)
++ newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex)
++ else:
++ newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex)
++ newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex)
++ newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('fluid', myid, localIndex)
+ self.fluidLoads_array_X.setValues([iGlobalVertex], newFx)
+ self.fluidLoads_array_Y.setValues([iGlobalVertex], newFy)
+@@ -1457,22 +1457,22 @@
+ FX_b = self.fluidLoads_array_X.sum()
+ FY_b = self.fluidLoads_array_Y.sum()
+ FZ_b = self.fluidLoads_array_Z.sum()
+-
++
+
+ def setFluidInterfaceVarCoord(self, FluidSolver):
+- """
+- Communicate the change of coordinates of the fluid interface to the fluid solver.
+- Prepare the fluid solver for mesh deformation.
+- """
++ """
++ Communicate the change of coordinates of the fluid interface to the fluid solver.
++ Prepare the fluid solver for mesh deformation.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Send the new fluid interface position to the fluid solver (on each partition, halo nodes included) ---
+ localIndex = 0
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
+- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
++ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.FluidHaloNodeList[myid].keys():
+ posX0, posY0, posZ0 = self.haloNodesPositionsInit[GlobalIndex]
+ DispX, DispY, DispZ = self.haloNodesDisplacements[GlobalIndex]
+@@ -1491,32 +1491,32 @@
+ FluidSolver.SetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex, posZ)
+ localIndex += 1
+ # Prepares the mesh deformation in the fluid solver
+- nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex)
++ nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex)
++
+
+-
+ def setSolidInterfaceLoads(self, SolidSolver, FSI_config, time):
+- """
+- Communicates the new solid interface loads to the solid solver.
+- In case of rigid body motion, calculates the new resultant forces (lift, drag, ...).
+- """
++ """
++ Communicates the new solid interface loads to the solid solver.
++ In case of rigid body motion, calculates the new resultant forces (lift, drag, ...).
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- FY = 0.0 # solid-side resultant forces
++ FY = 0.0 # solid-side resultant forces
+ FX = 0.0
+ FZ = 0.0
+- FFX = 0.0 # fluid-side resultant forces
+- FFY = 0.0
+- FFZ = 0.0
++ FFX = 0.0 # fluid-side resultant forces
++ FFY = 0.0
++ FFZ = 0.0
+
+ # --- Check for total force conservation after interpolation
+ FFX = self.fluidLoads_array_X.sum()
+ FFY = self.fluidLoads_array_Y.sum()
+ FFZ = self.fluidLoads_array_Z.sum()
+
+-
++
+ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ FX += self.localSolidLoads_array_X[iVertex]
+ FY += self.localSolidLoads_array_Y[iVertex]
+@@ -1527,9 +1527,9 @@
+ FY = self.comm.allreduce(FY)
+ FZ = self.comm.allreduce(FZ)
+
+- self.MPIPrint("Checking f/s interface total force...")
+- self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ))
+- self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ))
++ self.MPIPrint("Checking f/s interface total force...")
++ self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ))
++ self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ))
+
+ # --- Send the new solid interface loads to the solid solver (on each partition, halo nodes included) ---
+ GlobalIndex = int()
+@@ -1541,25 +1541,25 @@
+ pass
+ else:
+ Fx = self.localSolidLoads_array_X[localIndex]
+- Fy = self.localSolidLoads_array_Y[localIndex]
+- Fz = self.localSolidLoads_array_Z[localIndex]
++ Fy = self.localSolidLoads_array_Y[localIndex]
++ Fz = self.localSolidLoads_array_Z[localIndex]
+ SolidSolver.applyload(iVertex, Fx, Fy, Fz, time)
+ localIndex += 1
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
+ SolidSolver.setGeneralisedForce()
+- SolidSolver.setGeneralisedMoment()
++ SolidSolver.setGeneralisedMoment()
+
+ def computeSolidInterfaceResidual(self, SolidSolver):
+- """
+- Computes the solid interface FSI displacement residual.
+- """
++ """
++ Computes the solid interface FSI displacement residual.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- normInterfaceResidualSquare = 0.0
++ normInterfaceResidualSquare = 0.0
+
+ # --- Create and fill the PETSc vector for the predicted solid interface position (predicted by the solid computation) ---
+ if self.have_MPI == True:
+@@ -1575,27 +1575,27 @@
+ predDisp_array_Y = PETSc.Vec().create()
+ predDisp_array_Y.setType('seq')
+ predDisp_array_Z = PETSc.Vec().create()
+- predDisp_array_Z.setType('seq')
++ predDisp_array_Z.setType('seq')
+ predDisp_array_X.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ predDisp_array_Y.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ predDisp_array_Z.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+-
+- if myid in self.solidSolverProcessors:
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
+- predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
+- predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
+- predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
++
++ if myid in self.solidSolverProcessors:
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
++ predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
++ predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
++ predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, iVertex)
+ predDisp_array_X.setValues([iGlobalVertex], predDispx)
+ predDisp_array_Y.setValues([iGlobalVertex], predDispy)
+ predDisp_array_Z.setValues([iGlobalVertex], predDispz)
+-
+- predDisp_array_X.assemblyBegin()
+- predDisp_array_X.assemblyEnd()
+- predDisp_array_Y.assemblyBegin()
+- predDisp_array_Y.assemblyEnd()
+- predDisp_array_Z.assemblyBegin()
+- predDisp_array_Z.assemblyEnd()
++
++ predDisp_array_X.assemblyBegin()
++ predDisp_array_X.assemblyEnd()
++ predDisp_array_Y.assemblyBegin()
++ predDisp_array_Y.assemblyEnd()
++ predDisp_array_Z.assemblyBegin()
++ predDisp_array_Z.assemblyEnd()
+
+ # --- Calculate the residual (vector and norm) ---
+ self.solidInterfaceResidual_array_X = predDisp_array_X - self.solidInterface_array_DispX
+@@ -1615,45 +1615,45 @@
+ del predDisp_array_Y
+ del predDisp_array_Z
+
+- return sqrt(normInterfaceResidualSquare)
++ return sqrt(normInterfaceResidualSquare)
+
+ def relaxSolidPosition(self,FSI_config):
+- """
+- Apply solid displacement under-relaxation.
+- """
++ """
++ Apply solid displacement under-relaxation.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+ # --- Set the Aitken coefficient for the relaxation ---
+- if FSI_config['AITKEN_RELAX'] == 'STATIC':
+- self.aitkenParam = FSI_config['AITKEN_PARAM']
+- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
+- self.setAitkenCoefficient(FSI_config)
+- else:
+- self.aitkenParam = 1.0
++ if FSI_config['AITKEN_RELAX'] == 'STATIC':
++ self.aitkenParam = FSI_config['AITKEN_PARAM']
++ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
++ self.setAitkenCoefficient(FSI_config)
++ else:
++ self.aitkenParam = 1.0
+
+- self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam))
++ self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam))
+
+ # --- Relax the solid interface position ---
+ self.solidInterface_array_DispX += self.aitkenParam*self.solidInterfaceResidual_array_X
+ self.solidInterface_array_DispY += self.aitkenParam*self.solidInterfaceResidual_array_Y
+ self.solidInterface_array_DispZ += self.aitkenParam*self.solidInterfaceResidual_array_Z
+-
++
+
+ def setAitkenCoefficient(self, FSI_config):
+- """
+- Computes the Aitken coefficients for solid displacement under-relaxation.
+- """
+-
+- deltaResNormSquare = 0.0
+- prodScalRes = 0.0
+-
++ """
++ Computes the Aitken coefficients for solid displacement under-relaxation.
++ """
++
++ deltaResNormSquare = 0.0
++ prodScalRes = 0.0
++
+ # --- Create the PETSc vector for the difference between the residuals (current and previous FSI iter) ---
+- if self.FSIIter == 0:
+- self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam)
+- else:
++ if self.FSIIter == 0:
++ self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam)
++ else:
+ if self.have_MPI:
+ deltaResx_array_X = PETSc.Vec().create(self.comm)
+ deltaResx_array_X.setType('mpi')
+@@ -1688,9 +1688,9 @@
+ deltaResNormSquare_X = (deltaResx_array_X.norm())**2
+ deltaResNormSquare_Y = (deltaResx_array_Y.norm())**2
+ deltaResNormSquare_Z = (deltaResx_array_Z.norm())**2
+- deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z
++ deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z
+
+- self.aitkenParam *= -prodScalRes/deltaResNormSquare
++ self.aitkenParam *= -prodScalRes/deltaResNormSquare
+
+ deltaResx_array_X.destroy()
+ deltaResx_array_Y.destroy()
+@@ -1708,27 +1708,27 @@
+ self.solidInterfaceResidual_array_Z.copy(self.solidInterfaceResidualnM1_array_Z)
+
+ def displacementPredictor(self, FSI_config , SolidSolver, deltaT):
+- """
+- Calculates a prediciton for the solid interface position for the next time step.
+- """
++ """
++ Calculates a prediciton for the solid interface position for the next time step.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- if FSI_config['DISP_PRED'] == 'FIRST_ORDER':
+- self.MPIPrint("First order predictor")
+- alpha_0 = 1.0
+- alpha_1 = 0.0
+- elif FSI_config['DISP_PRED'] == 'SECOND_ORDER':
+- self.MPIPrint("Second order predictor")
+- alpha_0 = 1.0
+- alpha_1 = 0.5
+- else:
+- self.MPIPrint("No predictor")
+- alpha_0 = 0.0
+- alpha_1 = 0.0
++ if FSI_config['DISP_PRED'] == 'FIRST_ORDER':
++ self.MPIPrint("First order predictor")
++ alpha_0 = 1.0
++ alpha_1 = 0.0
++ elif FSI_config['DISP_PRED'] == 'SECOND_ORDER':
++ self.MPIPrint("Second order predictor")
++ alpha_0 = 1.0
++ alpha_1 = 0.5
++ else:
++ self.MPIPrint("No predictor")
++ alpha_0 = 0.0
++ alpha_1 = 0.0
+
+ # --- Create the PETSc vectors to store the solid interface velocity ---
+ if self.have_MPI == True:
+@@ -1774,18 +1774,18 @@
+ # --- Fill the PETSc vectors ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
+- GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
++ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+- velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex)
+- vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex)
+- velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex)
+- velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex)
+- velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex)
+- velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex)
++ velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex)
++ vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex)
++ velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex)
++ velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex)
++ velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex)
++ velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex)
+ Vel_array_X.setValues([iGlobalVertex],velx)
+ Vel_array_Y.setValues([iGlobalVertex],vely)
+ Vel_array_Z.setValues([iGlobalVertex],velz)
+@@ -1822,27 +1822,27 @@
+ del VelnM1_array_X, VelnM1_array_Y, VelnM1_array_Z
+
+ def writeFSIHistory(self, TimeIter, time, varCoordNorm, FSIConv):
+- """
+- Write the FSI history file of the computaion.
+- """
++ """
++ Write the FSI history file of the computaion.
++ """
+
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ if myid == self.rootProcess:
+- if self.unsteady:
+- if TimeIter == 0:
+- histFile = open('FSIhistory.dat', "w")
++ if self.unsteady:
++ if TimeIter == 0:
++ histFile = open('FSIhistory.dat', "w")
+ histFile.write("TimeIter\tTime\tFSIRes\tFSINbIter\n")
+- else:
+- histFile = open('FSIhistory.dat', "a")
+- if FSIConv:
+- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n')
+- else:
+- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n')
+- histFile.close()
++ else:
++ histFile = open('FSIhistory.dat', "a")
++ if FSIConv:
++ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n')
++ else:
++ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n')
++ histFile.close()
+ else:
+ if self.FSIIter == 0:
+ histFile = open('FSIhistory.dat', "w")
+@@ -1851,7 +1851,7 @@
+ histFile = open('FSIhistory.dat', "a")
+ histFile.write(str(self.FSIIter) + '\t' + str(varCoordNorm) + '\n')
+ histFile.close()
+-
++
+
+ self.MPIBarrier()
+
+@@ -1868,254 +1868,254 @@
+ globalIndex = globalStartIndex + iLocalVertex
+
+ return globalIndex
+-
++
+
+ def UnsteadyFSI(self,FSI_config, FluidSolver, SolidSolver):
+- """
+- Run the unsteady FSI computation by synchronizing the fluid and solid solvers.
+- F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes).
+- """
++ """
++ Run the unsteady FSI computation by synchronizing the fluid and solid solvers.
++ F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes).
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
+- numberPart = self.comm.Get_size()
++ myid = self.comm.Get_rank()
++ numberPart = self.comm.Get_size()
+ else:
+ myid = 0
+ numberPart = 1
+
+- # --- Set some general variables for the unsteady computation --- #
+- deltaT = FSI_config['UNST_TIMESTEP'] # physical time step
+- totTime = FSI_config['UNST_TIME'] # physical simulation time
+- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
+- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
+- TimeIterTreshold = 0 # time iteration from which we allow the solid to deform
+-
+- if FSI_config['RESTART_SOL'] == 'YES':
+- startTime = FSI_config['START_TIME']
+- NbTimeIter = ((totTime)/deltaT)-1
+- time = startTime
+- TimeIter = FSI_config['RESTART_ITER']
+- else:
+- NbTimeIter = (totTime/deltaT)-1 # number of time iterations
+- time = 0.0 # initial time
+- TimeIter = 0 # initial time iteration
+-
+- NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer
+-
+- varCoordNorm = 0.0 # FSI residual
+- FSIConv = False # FSI convergence flag
+-
+- self.MPIPrint('\n**********************************')
+- self.MPIPrint('* Begin unsteady FSI computation *')
+- self.MPIPrint('**********************************\n')
+-
+- # --- Initialize the coupled solution --- #
+- #If restart (DOES NOT WORK YET)
+- if FSI_config['RESTART_SOL'] == 'YES':
+- TimeIterTreshold = -1
+- FluidSolver.setTemporalIteration(TimeIter)
+- if myid == self.rootProcess:
+- SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True)
++ # --- Set some general variables for the unsteady computation --- #
++ deltaT = FSI_config['UNST_TIMESTEP'] # physical time step
++ totTime = FSI_config['UNST_TIME'] # physical simulation time
++ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
++ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
++ TimeIterTreshold = 0 # time iteration from which we allow the solid to deform
++
++ if FSI_config['RESTART_SOL'] == 'YES':
++ startTime = FSI_config['START_TIME']
++ NbTimeIter = ((totTime)/deltaT)-1
++ time = startTime
++ TimeIter = FSI_config['RESTART_ITER']
++ else:
++ NbTimeIter = (totTime/deltaT)-1 # number of time iterations
++ time = 0.0 # initial time
++ TimeIter = 0 # initial time iteration
++
++ NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer
++
++ varCoordNorm = 0.0 # FSI residual
++ FSIConv = False # FSI convergence flag
++
++ self.MPIPrint('\n**********************************')
++ self.MPIPrint('* Begin unsteady FSI computation *')
++ self.MPIPrint('**********************************\n')
++
++ # --- Initialize the coupled solution --- #
++ #If restart (DOES NOT WORK YET)
++ if FSI_config['RESTART_SOL'] == 'YES':
++ TimeIterTreshold = -1
++ FluidSolver.setTemporalIteration(TimeIter)
++ if myid == self.rootProcess:
++ SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True)
++ if self.have_MPI == True:
++ self.comm.barrier()
++ FluidSolver.setInitialMesh(True)
++ if myid == self.rootProcess:
++ SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray())
+ if self.have_MPI == True:
+- self.comm.barrier()
+- FluidSolver.setInitialMesh(True)
+- if myid == self.rootProcess:
+- SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray())
+- if self.have_MPI == True:
+- self.comm.barrier()
+- if myid == self.rootProcess:
+- SolidSolver.updateSolution()
+- #If no restart
+- else:
+- self.MPIPrint('Setting FSI initial conditions')
++ self.comm.barrier()
++ if myid == self.rootProcess:
++ SolidSolver.updateSolution()
++ #If no restart
++ else:
++ self.MPIPrint('Setting FSI initial conditions')
+ if myid in self.solidSolverProcessors:
+- SolidSolver.setInitialDisplacements()
++ SolidSolver.setInitialDisplacements()
+ self.getSolidInterfaceDisplacement(SolidSolver)
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
+- self.setFluidInterfaceVarCoord(FluidSolver)
+- FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver
+- self.MPIPrint('\nFSI initial conditions are set')
+- self.MPIPrint('Beginning time integration\n')
+-
+- # --- External temporal loop --- #
+- while TimeIter <= NbTimeIter:
+-
+- if TimeIter > TimeIterTreshold:
+- NbFSIIter = NbFSIIterMax
+- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter))
+- else:
+- NbFSIIter = 1
+-
+- self.FSIIter = 0
+- FSIConv = False
+- FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration
+-
+- # --- Internal FSI loop --- #
+- while self.FSIIter <= (NbFSIIter-1):
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.setFluidInterfaceVarCoord(FluidSolver)
++ FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver
++ self.MPIPrint('\nFSI initial conditions are set')
++ self.MPIPrint('Beginning time integration\n')
++
++ # --- External temporal loop --- #
++ while TimeIter <= NbTimeIter:
++
++ if TimeIter > TimeIterTreshold:
++ NbFSIIter = NbFSIIterMax
++ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter))
++ else:
++ NbFSIIter = 1
++
++ self.FSIIter = 0
++ FSIConv = False
++ FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration
+
+- self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter))
++ # --- Internal FSI loop --- #
++ while self.FSIIter <= (NbFSIIter-1):
+
+- # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- #
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter))
++
++ # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- #
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
+ self.MPIPrint('\nPerforming dynamic mesh deformation (ALE)...\n')
+ self.setFluidInterfaceVarCoord(FluidSolver)
+ FluidSolver.DynamicMeshUpdate(TimeIter)
+-
+- # --- Fluid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...')
++
++ # --- Fluid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...')
+ self.MPIBarrier()
+- FluidSolver.ResetConvergence()
+- FluidSolver.Run()
++ FluidSolver.ResetConvergence()
++ FluidSolver.Run()
+ self.MPIBarrier()
+
+- # --- Surface fluid loads interpolation and communication --- #
+- self.MPIPrint('\nProcessing interface fluid loads...\n')
++ # --- Surface fluid loads interpolation and communication --- #
++ self.MPIPrint('\nProcessing interface fluid loads...\n')
+ self.MPIBarrier()
+- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
++ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
+ self.MPIBarrier()
+- if TimeIter > TimeIterTreshold:
+- self.interpolateFluidLoadsOnSolidMesh(FSI_config)
+- self.setSolidInterfaceLoads(SolidSolver, FSI_config, time)
++ if TimeIter > TimeIterTreshold:
++ self.interpolateFluidLoadsOnSolidMesh(FSI_config)
++ self.setSolidInterfaceLoads(SolidSolver, FSI_config, time)
+
+- # --- Solid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching solid solver for a single time iteration...\n')
++ # --- Solid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching solid solver for a single time iteration...\n')
+ if myid in self.solidSolverProcessors:
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
+- SolidSolver.timeIteration(time)
+- elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER':
+- SolidSolver.run(time-deltaT, time)
+-
+- # --- Compute and monitor the FSI residual --- #
+- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
+- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
+- if varCoordNorm < FSITolerance:
+- FSIConv = True
+- break
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ SolidSolver.timeIteration(time)
++ elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER':
++ SolidSolver.run(time-deltaT, time)
++
++ # --- Compute and monitor the FSI residual --- #
++ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
++ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
++ if varCoordNorm < FSITolerance:
++ FSIConv = True
++ break
+
+- # --- Relaxe the solid position --- #
++ # --- Relaxe the solid position --- #
+ self.MPIPrint('\nProcessing interface displacements...\n')
+- self.relaxSolidPosition(FSI_config)
+-
+- self.FSIIter += 1
+- # --- End OF FSI loop --- #
++ self.relaxSolidPosition(FSI_config)
++
++ self.FSIIter += 1
++ # --- End OF FSI loop --- #
+
+ self.MPIBarrier()
+
+- # --- Update the FSI history file --- #
+- if TimeIter > TimeIterTreshold:
+- self.MPIPrint('\nBGS is converged (strong coupling)')
+- self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv)
+-
+- # --- Update, monitor and output the fluid solution before the next time step ---#
+- FluidSolver.Update()
+- FluidSolver.Monitor(TimeIter)
+- FluidSolver.Output(TimeIter)
+-
+- if TimeIter >= TimeIterTreshold:
+- if myid in self.solidSolverProcessors:
+- # --- Output the solid solution before thr next time step --- #
+- SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter)
+-
+- # --- Displacement predictor for the next time step and update of the solid solution --- #
+- self.MPIPrint('\nSolid displacement prediction for next time step')
+- self.displacementPredictor(FSI_config, SolidSolver, deltaT)
++ # --- Update the FSI history file --- #
++ if TimeIter > TimeIterTreshold:
++ self.MPIPrint('\nBGS is converged (strong coupling)')
++ self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv)
++
++ # --- Update, monitor and output the fluid solution before the next time step ---#
++ FluidSolver.Update()
++ FluidSolver.Monitor(TimeIter)
++ FluidSolver.Output(TimeIter)
++
++ if TimeIter >= TimeIterTreshold:
++ if myid in self.solidSolverProcessors:
++ # --- Output the solid solution before thr next time step --- #
++ SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter)
++
++ # --- Displacement predictor for the next time step and update of the solid solution --- #
++ self.MPIPrint('\nSolid displacement prediction for next time step')
++ self.displacementPredictor(FSI_config, SolidSolver, deltaT)
+ if myid in self.solidSolverProcessors:
+- SolidSolver.updateSolution()
+-
+- TimeIter += 1
+- time += deltaT
+- #--- End of the temporal loop --- #
++ SolidSolver.updateSolution()
++
++ TimeIter += 1
++ time += deltaT
++ #--- End of the temporal loop --- #
+
+ self.MPIBarrier()
+
+- self.MPIPrint('\n*************************')
+- self.MPIPrint('* End FSI computation *')
+- self.MPIPrint('*************************\n')
++ self.MPIPrint('\n*************************')
++ self.MPIPrint('* End FSI computation *')
++ self.MPIPrint('*************************\n')
+
+ def SteadyFSI(self, FSI_config,FluidSolver, SolidSolver):
+- """
+- Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface.
+- """
++ """
++ Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
+- numberPart = self.comm.Get_size()
++ myid = self.comm.Get_rank()
++ numberPart = self.comm.Get_size()
+ else:
+ myid = 0
+ numberPart = 1
+
+- # --- Set some general variables for the steady computation --- #
+- NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step
+- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
+- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
+- varCoordNorm = 0.0
+-
+- self.MPIPrint('\n********************************')
+- self.MPIPrint('* Begin steady FSI computation *')
+- self.MPIPrint('********************************\n')
+- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************')
++ # --- Set some general variables for the steady computation --- #
++ NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step
++ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
++ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
++ varCoordNorm = 0.0
++
++ self.MPIPrint('\n********************************')
++ self.MPIPrint('* Begin steady FSI computation *')
++ self.MPIPrint('********************************\n')
++ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************')
+
+ self.getSolidInterfaceDisplacement(SolidSolver)
+
+- # --- External FSI loop --- #
+- self.FSIIter = 0
+- while self.FSIIter < NbFSIIterMax:
+- self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter))
+- self.MPIPrint('\nLaunching fluid solver for a steady computation...')
+- # --- Fluid solver call for FSI subiteration ---#
+- Iter = 0
+- FluidSolver.ResetConvergence()
+- while Iter < NbIter:
+- FluidSolver.PreprocessExtIter(Iter)
+- FluidSolver.Run()
+- StopIntegration = FluidSolver.Monitor(Iter)
+- FluidSolver.Output(Iter)
+- if StopIntegration:
+- break;
+- Iter += 1
+-
+- # --- Surface fluid loads interpolation and communication ---#
+- self.MPIPrint('\nProcessing interface fluid loads...\n')
++ # --- External FSI loop --- #
++ self.FSIIter = 0
++ while self.FSIIter < NbFSIIterMax:
++ self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter))
++ self.MPIPrint('\nLaunching fluid solver for a steady computation...')
++ # --- Fluid solver call for FSI subiteration ---#
++ Iter = 0
++ FluidSolver.ResetConvergence()
++ while Iter < NbIter:
++ FluidSolver.PreprocessExtIter(Iter)
++ FluidSolver.Run()
++ StopIntegration = FluidSolver.Monitor(Iter)
++ FluidSolver.Output(Iter)
++ if StopIntegration:
++ break;
++ Iter += 1
++
++ # --- Surface fluid loads interpolation and communication ---#
++ self.MPIPrint('\nProcessing interface fluid loads...\n')
+ self.MPIBarrier()
+- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
++ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
+ self.MPIBarrier()
+- self.interpolateFluidLoadsOnSolidMesh(FSI_config)
+- self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05)
+-
+- # --- Solid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching solid solver for a static computation...\n')
++ self.interpolateFluidLoadsOnSolidMesh(FSI_config)
++ self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05)
++
++ # --- Solid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching solid solver for a static computation...\n')
+ if myid in self.solidSolverProcessors:
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
+- SolidSolver.staticComputation()
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ SolidSolver.staticComputation()
+ else:
+ SolidSolver.run(0.0, 0.05)
+- SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter)
++ SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter)
+
+- # --- Compute and monitor the FSI residual --- #
+- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
+- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
++ # --- Compute and monitor the FSI residual --- #
++ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
++ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
+ self.writeFSIHistory(0, 0.0, varCoordNorm, False)
+- if varCoordNorm < FSITolerance:
+- break
++ if varCoordNorm < FSITolerance:
++ break
+
+ # --- Relaxe the solid displacement and update the solid solution --- #
+ self.MPIPrint('\nProcessing interface displacements...\n')
+- self.relaxSolidPosition(FSI_config)
++ self.relaxSolidPosition(FSI_config)
+ if myid in self.solidSolverProcessors:
+ SolidSolver.updateSolution()
+-
+- # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- #
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
+- self.MPIPrint('\nPerforming static mesh deformation...\n')
+- self.setFluidInterfaceVarCoord(FluidSolver)
+- FluidSolver.StaticMeshUpdate()
+- self.FSIIter += 1
++
++ # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- #
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.MPIPrint('\nPerforming static mesh deformation...\n')
++ self.setFluidInterfaceVarCoord(FluidSolver)
++ FluidSolver.StaticMeshUpdate()
++ self.FSIIter += 1
+
+ self.MPIBarrier()
+
+- self.MPIPrint('\nBGS is converged (strong coupling)')
+- self.MPIPrint(' ')
+- self.MPIPrint('*************************')
+- self.MPIPrint('* End FSI computation *')
+- self.MPIPrint('*************************')
+- self.MPIPrint(' ')
++ self.MPIPrint('\nBGS is converged (strong coupling)')
++ self.MPIPrint(' ')
++ self.MPIPrint('*************************')
++ self.MPIPrint('* End FSI computation *')
++ self.MPIPrint('*************************')
++ self.MPIPrint(' ')
+diff -Naur old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py
+--- old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-10 16:17:07.000000000 +0300
+@@ -174,9 +174,9 @@
+
+ with open(self.Config_file) as configfile:
+ while 1:
+- line = configfile.readline()
+- if not line:
+- break
++ line = configfile.readline()
++ if not line:
++ break
+
+ # remove line returns
+ line = line.strip('\r\n')
+@@ -189,41 +189,41 @@
+ this_value = line[1].strip()
+
+ for case in switch(this_param):
+- #integer values
+- #if case("NB_FSI_ITER") :
+- #self.Config[this_param] = int(this_value)
+- #break
+-
+- #float values
+- if case("DELTA_T") : pass
+- if case("START_TIME") : pass
+- if case("STOP_TIME") : pass
+- if case("SPRING_MASS") : pass
+- if case("INERTIA_FLEXURAL") : pass
+- if case("SPRING_STIFFNESS") : pass
+- if case("SPRING_DAMPING") : pass
+- if case("TORSIONAL_STIFFNESS") : pass
+- if case("TORSIONAL_DAMPING") : pass
+- if case("CORD") : pass
+- if case("FLEXURAL_AXIS") : pass
+- if case("GRAVITY_CENTER") : pass
+- if case("INITIAL_DISP") : pass
+- if case("INITIAL_ANGLE") : pass
+- if case("RHO") :
+- self.Config[this_param] = float(this_value)
+- break
+-
+- #string values
+- if case("TIME_MARCHING") : pass
+- if case("MESH_FILE") : pass
+- if case("CSD_SOLVER") : pass
+- if case("MOVING_MARKER") : pass
+- if case("STRUCT_TYPE") :
+- self.Config[this_param] = this_value
+- break
++ #integer values
++ #if case("NB_FSI_ITER") :
++ #self.Config[this_param] = int(this_value)
++ #break
++
++ #float values
++ if case("DELTA_T") : pass
++ if case("START_TIME") : pass
++ if case("STOP_TIME") : pass
++ if case("SPRING_MASS") : pass
++ if case("INERTIA_FLEXURAL") : pass
++ if case("SPRING_STIFFNESS") : pass
++ if case("SPRING_DAMPING") : pass
++ if case("TORSIONAL_STIFFNESS") : pass
++ if case("TORSIONAL_DAMPING") : pass
++ if case("CORD") : pass
++ if case("FLEXURAL_AXIS") : pass
++ if case("GRAVITY_CENTER") : pass
++ if case("INITIAL_DISP") : pass
++ if case("INITIAL_ANGLE") : pass
++ if case("RHO") :
++ self.Config[this_param] = float(this_value)
++ break
++
++ #string values
++ if case("TIME_MARCHING") : pass
++ if case("MESH_FILE") : pass
++ if case("CSD_SOLVER") : pass
++ if case("MOVING_MARKER") : pass
++ if case("STRUCT_TYPE") :
++ self.Config[this_param] = this_value
++ break
+
+- if case():
+- print(this_param + " is an invalid option !")
++ if case():
++ print(this_param + " is an invalid option !")
+ break
+
+ def __readSU2Mesh(self):
+@@ -233,78 +233,78 @@
+ print('Opened mesh file ' + self.Mesh_file + '.')
+ while 1:
+ line = meshfile.readline()
+- if not line:
+- break
++ if not line:
++ break
+
+- pos = line.find('NDIM')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NDIM')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nDim = int(line[1])
+- continue
+-
+- pos = line.find('NELEM')
+- if pos != -1:
+- line = line.strip('\r\n')
++ self.nDim = int(line[1])
++ continue
++
++ pos = line.find('NELEM')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nElem = int(line[1])
+- continue
++ self.nElem = int(line[1])
++ continue
+
+- pos = line.find('NPOIN')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NPOIN')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nPoint = int(line[1])
++ self.nPoint = int(line[1])
+ for iPoint in range(self.nPoint):
+- self.node.append(Point())
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split(' ',self.nDim)
+- x = float(line[0])
+- y = float(line[1])
++ self.node.append(Point())
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split(' ',self.nDim)
++ x = float(line[0])
++ y = float(line[1])
+ z = 0.0
+- if self.nDim == 3:
+- z = float(line[2])
+- self.node[iPoint].SetCoord((x,y,z))
++ if self.nDim == 3:
++ z = float(line[2])
++ self.node[iPoint].SetCoord((x,y,z))
+ self.node[iPoint].SetCoord0((x,y,z))
+- self.node[iPoint].SetCoord_n((x,y,z))
+- continue
++ self.node[iPoint].SetCoord_n((x,y,z))
++ continue
+
+- pos = line.find('NMARK')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NMARK')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nMarker = int(line[1])
+- continue
++ self.nMarker = int(line[1])
++ continue
+
+- pos = line.find('MARKER_TAG')
+- if pos != -1:
+- line = line.strip('\r\n')
+- line = line.replace(" ", "")
++ pos = line.find('MARKER_TAG')
++ if pos != -1:
++ line = line.strip('\r\n')
++ line = line.replace(" ", "")
+ line = line.split("=",1)
+- markerTag = line[1]
+- if markerTag == self.FSI_marker:
+- self.markers[markerTag] = []
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split("=",1)
+- nElem = int(line[1])
+- for iElem in range(nElem):
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split(' ',1)
+- elemType = int(line[0])
+- if elemType == 3:
+- nodes = line[1].split(' ', 1)
+- if not int(nodes[0]) in self.markers[markerTag]:
+- self.markers[markerTag].append(int(nodes[0]))
+- if not int(nodes[1]) in self.markers[markerTag]:
+- self.markers[markerTag].append(int(nodes[1]))
+- else:
+- print("Element type {} is not recognized !!".format(elemType))
+- continue
+- else:
+- continue
++ markerTag = line[1]
++ if markerTag == self.FSI_marker:
++ self.markers[markerTag] = []
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split("=",1)
++ nElem = int(line[1])
++ for iElem in range(nElem):
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split(' ',1)
++ elemType = int(line[0])
++ if elemType == 3:
++ nodes = line[1].split(' ', 1)
++ if not int(nodes[0]) in self.markers[markerTag]:
++ self.markers[markerTag].append(int(nodes[0]))
++ if not int(nodes[1]) in self.markers[markerTag]:
++ self.markers[markerTag].append(int(nodes[1]))
++ else:
++ print("Element type {} is not recognized !!".format(elemType))
++ continue
++ else:
++ continue
+
+ print("Number of dimensions: {}".format(self.nDim))
+ print("Number of elements: {}".format(self.nElem))
+@@ -441,23 +441,23 @@
+ Coord_n = self.node[iPoint].GetCoord_n()
+
+ if self.Unsteady:
+- r = Coord_n - self.centerOfRotation_n
+- else:
+- r = Coord - self.centerOfRotation
++ r = Coord_n - self.centerOfRotation_n
++ else:
++ r = Coord - self.centerOfRotation
+
+- rotCoord = rotMatrix.dot(r)
++ rotCoord = rotMatrix.dot(r)
+
+ newCoord = newCenter + rotCoord
+ newVel[0] = Centerdot[0]+psidot*(newCoord[1]-newCenter[1])
+- newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0])
+- newVel[2] = Centerdot[2]+0.0
++ newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0])
++ newVel[2] = Centerdot[2]+0.0
+
+ self.node[iPoint].SetCoord((newCoord[0], newCoord[1], newCoord[2]))
+ self.node[iPoint].SetVel((newVel[0], newVel[1], newVel[2]))
+
+- if initialize:
+- self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2]))
+- self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2]))
++ if initialize:
++ self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2]))
++ self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2]))
+
+ self.centerOfRotation = np.copy(newCenter)
+
+diff -Naur old/SU2_PY/FSI/io/FSI_config.py new/SU2_PY/FSI/io/FSI_config.py
+--- old/SU2_PY/FSI/io/FSI_config.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/io/FSI_config.py 2020-05-10 16:17:07.000000000 +0300
+@@ -58,23 +58,23 @@
+ self.readConfig()
+
+ def __str__(self):
+- tempString = str()
+- for key, value in self._ConfigContent.items():
+- tempString += "{} = {}\n".format(key,value)
+- return tempString
++ tempString = str()
++ for key, value in self._ConfigContent.items():
++ tempString += "{} = {}\n".format(key,value)
++ return tempString
+
+ def __getitem__(self,key):
+- return self._ConfigContent[key]
++ return self._ConfigContent[key]
+
+ def __setitem__(self, key, value):
+- self._ConfigContent[key] = value
++ self._ConfigContent[key] = value
+
+ def readConfig(self):
+ input_file = open(self.ConfigFileName)
+ while 1:
+- line = input_file.readline()
+- if not line:
+- break
++ line = input_file.readline()
++ if not line:
++ break
+ # remove line returns
+ line = line.strip('\r\n')
+ # make sure it has useful data
+@@ -86,46 +86,46 @@
+ this_value = line[1].strip()
+
+ for case in switch(this_param):
+- #integer values
+- if case("NDIM") : pass
+- #if case("MESH_DEF_LIN_ITER") : pass
+- #if case("MESH_DEF_NONLIN_ITER") : pass
+- if case("RESTART_ITER") : pass
+- if case("NB_EXT_ITER") : pass
+- if case("NB_FSI_ITER") :
+- self._ConfigContent[this_param] = int(this_value)
+- break
++ #integer values
++ if case("NDIM") : pass
++ #if case("MESH_DEF_LIN_ITER") : pass
++ #if case("MESH_DEF_NONLIN_ITER") : pass
++ if case("RESTART_ITER") : pass
++ if case("NB_EXT_ITER") : pass
++ if case("NB_FSI_ITER") :
++ self._ConfigContent[this_param] = int(this_value)
++ break
+
+- #float values
++ #float values
+ if case("RBF_RADIUS") : pass
+- if case("AITKEN_PARAM") : pass
+- if case("START_TIME") : pass
+- if case("UNST_TIMESTEP") : pass
+- if case("UNST_TIME") : pass
+- if case("FSI_TOLERANCE") :
+- self._ConfigContent[this_param] = float(this_value)
+- break
+-
+- #string values
+- if case("CFD_CONFIG_FILE_NAME") : pass
+- if case("CSD_SOLVER") : pass
+- if case("CSD_CONFIG_FILE_NAME") : pass
+- if case("RESTART_SOL") : pass
+- if case("MATCHING_MESH") : pass
++ if case("AITKEN_PARAM") : pass
++ if case("START_TIME") : pass
++ if case("UNST_TIMESTEP") : pass
++ if case("UNST_TIME") : pass
++ if case("FSI_TOLERANCE") :
++ self._ConfigContent[this_param] = float(this_value)
++ break
++
++ #string values
++ if case("CFD_CONFIG_FILE_NAME") : pass
++ if case("CSD_SOLVER") : pass
++ if case("CSD_CONFIG_FILE_NAME") : pass
++ if case("RESTART_SOL") : pass
++ if case("MATCHING_MESH") : pass
+ if case("MESH_INTERP_METHOD") : pass
+- if case("DISP_PRED") : pass
+- if case("AITKEN_RELAX") : pass
+- if case("TIME_MARCHING") : pass
+- if case("INTERNAL_FLOW") :
+- #if case("MESH_DEF_METHOD") : pass
+- self._ConfigContent[this_param] = this_value
+- break
+-
+- if case():
+- print(this_param + " is an invalid option !")
+- break
+- #end for
+-
++ if case("DISP_PRED") : pass
++ if case("AITKEN_RELAX") : pass
++ if case("TIME_MARCHING") : pass
++ if case("INTERNAL_FLOW") :
++ #if case("MESH_DEF_METHOD") : pass
++ self._ConfigContent[this_param] = this_value
++ break
++
++ if case():
++ print(this_param + " is an invalid option !")
++ break
++ #end for
++
+
+
+ #def dump()
+diff -Naur old/SU2_PY/SU2/util/filter_adjoint.py new/SU2_PY/SU2/util/filter_adjoint.py
+--- old/SU2_PY/SU2/util/filter_adjoint.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/SU2/util/filter_adjoint.py 2020-05-10 16:17:07.000000000 +0300
+@@ -179,7 +179,7 @@
+ Sens_smoother = smooth( S_clip, Sens_smooth, smth_len , 'blackman' )
+ Sens_filter = Sens_smooth + (Sens_smooth - Sens_smoother) # sharpener
+ else:
+- raise Exception, 'unknown filter type'
++ raise Exception('unknown filter type')
+
+ # --------------------------------------------
+ # PLOTTING
+@@ -472,10 +472,10 @@
+ """
+
+ if x.ndim != 1:
+- raise ValueError, "smooth only accepts 1 dimension arrays."
++ raise ValueError("smooth only accepts 1 dimension arrays.")
+
+ if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
+- raise ValueError, "Window is not of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
++ raise ValueError("Window is not of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
+
+ # interpolate to constant time sample width
+ min_dt = np.min( np.diff(t) )
+diff -Naur old/SU2_PY/compute_uncertainty.py new/SU2_PY/compute_uncertainty.py
+--- old/SU2_PY/compute_uncertainty.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/compute_uncertainty.py 2020-05-10 16:17:07.000000000 +0300
+@@ -66,13 +66,13 @@
+
+ # perform eigenvalue perturbations
+ for comp in range(1,4):
+- print "\n\n =================== Performing " + str(comp) + " Component Perturbation =================== \n\n"
++ print('\n\n =================== Performing ' + str(comp) + ' Component Perturbation =================== \n\n')
+
+ # make copies
+ konfig = copy.deepcopy(config)
+ ztate = copy.deepcopy(state)
+
+- # set componentality
++ # set componentality
+ konfig.UQ_COMPONENT = comp
+
+ # send output to a folder
+@@ -85,14 +85,14 @@
+ # run su2
+ info = SU2.run.CFD(konfig)
+ ztate.update(info)
+-
+- # Solution merging
+- konfig.SOLUTION_FILENAME = konfig.RESTART_FILENAME
+- info = SU2.run.merge(konfig)
+- ztate.update(info)
++
++ # Solution merging
++ konfig.SOLUTION_FILENAME = konfig.RESTART_FILENAME
++ info = SU2.run.merge(konfig)
++ ztate.update(info)
+
+
+- print "\n\n =================== Performing p1c1 Component Perturbation =================== \n\n"
++ print('\n\n =================== Performing p1c1 Component Perturbation =================== \n\n')
+
+ # make copies
+ konfig = copy.deepcopy(config)
+@@ -118,7 +118,7 @@
+ info = SU2.run.merge(konfig)
+ state.update(info)
+
+- print "\n\n =================== Performing p1c2 Component Perturbation =================== \n\n"
++ print('\n\n =================== Performing p1c2 Component Perturbation =================== \n\n')
+
+ # make copies
+ konfig = copy.deepcopy(config)
+diff -Naur old/SU2_PY/fsi_computation.py new/SU2_PY/fsi_computation.py
+--- old/SU2_PY/fsi_computation.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/fsi_computation.py 2020-05-10 16:17:07.000000000 +0300
+@@ -74,9 +74,9 @@
+ if myid == rootProcess:
+ if os.getcwd() not in sys.path:
+ sys.path.append(os.getcwd())
+- print("Setting working directory : {}".format(os.getcwd()))
+- else:
+- print("Working directory is set to {}".format(os.getcwd()))
++ print("Setting working directory : {}".format(os.getcwd()))
++ else:
++ print("Working directory is set to {}".format(os.getcwd()))
+
+ # starts timer
+ start = timer.time()
diff --git a/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch b/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch
new file mode 100644
index 000000000..12acdfe78
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch
@@ -0,0 +1,31 @@
+diff -Naur old_static/externals/tecio/meson.build new_shared/externals/tecio/meson.build
+--- old_static/externals/tecio/meson.build 2020-05-09 16:35:10.000000000 +0300
++++ new_shared/externals/tecio/meson.build 2020-05-10 11:52:36.000000000 +0300
+@@ -1,15 +1,15 @@
+-check_dir = run_command(python,
+- script_path / 'check_dir.py',
+- 'boost')
+-if check_dir.returncode() != 0
+- message('Extracting boost ...')
+- extract_boost = run_command(python,
+- script_path / 'extract_file.py',
+- 'boost.tar.gz',
+- meson.current_source_dir(), check: true)
+-else
+- message('Boost sources found.')
+-endif
++#check_dir = run_command(python,
++# script_path / 'check_dir.py',
++# 'boost')
++#if check_dir.returncode() != 0
++# message('Extracting boost ...')
++# extract_boost = run_command(python,
++# script_path / 'extract_file.py',
++# 'boost.tar.gz',
++# meson.current_source_dir(), check: true)
++#else
++# message('Boost sources found.')
++#endif
+
+ if mpi
+ subdir('teciompisrc')
diff --git a/sci-physics/SU2/metadata.xml b/sci-physics/SU2/metadata.xml
new file mode 100644
index 000000000..636aded5f
--- /dev/null
+++ b/sci-physics/SU2/metadata.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
+<pkgmetadata>
+ <maintainer type="person">
+ <email>torokhov-s-a@yandex.ru</email>
+ <name>Sergey Torokhov</name>
+ </maintainer>
+ <use>
+ <flag name="cgns">Build with CGNS support (bundled)</flag>
+ <flag name="mkl">Enable Intel MKL support</flag>
+ <flag name="openblas">Enable OpenBLAS support</flag>
+ <flag name="tecio">Enable TECIO support</flag>
+ <flag name="tutorials">Install Tutorials files</flag>
+ </use>
+ <longdescription>
+ The SU2 suite is an open-source collection of C++ based software tools
+ for performing Partial Differential Equation (PDE) analysis and solving
+ PDE-constrained optimization problems.
+
+ The toolset is designed with Computational Fluid Dynamics (CFD)
+ and aerodynamic shape optimization in mind, but is extensible
+ to treat arbitrary sets of governing equations such as potential flow,
+ elasticity, electrodynamics, chemically-reacting flows, and many others.
+ </longdescription>
+ <upstream>
+ <remote-id type="github">su2code/SU2</remote-id>
+ </upstream>
+</pkgmetadata>