FEniCS version 2019.1.0 (docker), Linux Lubuntu 64Bit
Dear all.
I try to import several geometries (.msh
files) into FEniCS, and to increase the speed of the calculation, I try to realize the import in a parallel manner with the help of MPI. When executing the exemplifying code below with …
-
python3 XDMF_convert_v01.py
or mpirun -n 1 python3 XDMF_convert_v01.py
… then everything is fine! However, as soon as I use more than 1 core of the processor, then the code hangs. It is basically due to this …
mesh = Mesh()
… and what follows (it is marked in the code).
Question 1: what must I do such that things work for more than 1 core?
Related question 2: In fact, instead of using mpirun
from above, I have to use mpirun.mpich
! Otherwise the code is executed n times as a whole program! Do you have an idea why that is?
Thanks in advance for some hints!
Here is the link to the meshes, and here is the code (save it as XDMF_convert_v01.py)
import os
import meshio
from fenics import *
from mpi4py import MPI
# _________________________________________________________________ Definitions
def XDMF_convert(file_list, dir_data):
for i, file_path in enumerate(file_list):
# Read the mesh file.
mesh = meshio.read(file_path)
tetra_cells = None
tetra_data = None
triangle_cells = None
triangle_data = None
for key in mesh.cell_data_dict["gmsh:physical"].keys():
if key == "triangle":
triangle_data = mesh.cell_data_dict["gmsh:physical"][key]
elif key == "tetra":
tetra_data = mesh.cell_data_dict["gmsh:physical"][key]
for cell in mesh.cells:
if cell.type == "tetra":
if tetra_cells is None:
tetra_cells = cell.data
else:
tetra_cells = np.vstack([tetra_cells, cell.data])
elif cell.type == "triangle":
if triangle_cells is None:
triangle_cells = cell.data
else:
triangle_cells = np.vstack([triangle_cells, cell.data])
tetra_mesh = meshio.Mesh(points=mesh.points,
cells={"tetra": tetra_cells},
cell_data={"name_to_read":[tetra_data]})
triangle_mesh =meshio.Mesh(points=mesh.points,
cells=[("triangle", triangle_cells)],
cell_data={"name_to_read":[triangle_data]})
file_tetra = os.path.join(dir_data, "Data_"+str(i)+"_tetra_mesh.xdmf")
file_trian = os.path.join(dir_data, "Data_"+str(i)+"_mf.xdmf")
meshio.write(file_tetra, tetra_mesh)
meshio.write(file_trian, triangle_mesh)
# If the following block is **excluded**, the program does come to an end,
# even with 'mpirun -n x python3 XDMF_convert.py' with x > 1!
# The code basically hangs due to this Mesh() command.
#"""
mesh = Mesh()
with XDMFFile(file_tetra) as infile:
infile.read(mesh)
mvc = MeshValueCollection("size_t", mesh, 2)
with XDMFFile(file_trian) as infile:
infile.read(mvc, "name_to_read")
boundaries = cpp.mesh.MeshFunctionSizet(mesh, mvc)
mvc2 = MeshValueCollection("size_t", mesh, 3)
with XDMFFile(file_tetra) as infile:
infile.read(mvc2, "name_to_read")
subdomains = cpp.mesh.MeshFunctionSizet(mesh, mvc2)
#"""
# ________________________________________________________________________ Main
if __name__ == '__main__':
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Put all files into this sub-directory
dir_data = "./results"
# _____________________________________________ Only process No. 0
if rank == 0:
if not os.path.isdir(dir_data):
os.mkdir(dir_data)
list_files_all = ["./Data_0000.msh",
"./Data_0001.msh",
"./Data_0002.msh",
"./Data_0003.msh",
"./Data_0004.msh"]
# Distribute the files into a list that contains <size> number of
# processes.
list_files = []
for j in range(size):
list_files.append([])
for k, para in enumerate(list_files_all):
list_files[k % size].append(para)
print('Files:', list_files)
print("")
else:
list_files = None
# __________________________________________________ All processes
files = comm.scatter(list_files, root=0)
print('Process number:', rank,
'-- The process is calculating with the following files:')
print(files)
print("")
result = XDMF_convert(files, dir_data)