Mesh conversion (XDMF) in a MPI environment

FEniCS version 2019.1.0 (docker), Linux Lubuntu 64Bit

Dear all.

I try to import several geometries (.msh files) into FEniCS, and to increase the speed of the calculation, I try to realize the import in a parallel manner with the help of MPI. When executing the exemplifying code below with …

  • python3 XDMF_convert_v01.py or
  • mpirun -n 1 python3 XDMF_convert_v01.py

… then everything is fine! However, as soon as I use more than 1 core of the processor, then the code hangs. It is basically due to this …

mesh = Mesh()

… and what follows (it is marked in the code).

Question 1: what must I do such that things work for more than 1 core?
Related question 2: In fact, instead of using mpirun from above, I have to use mpirun.mpich! Otherwise the code is executed n times as a whole program! Do you have an idea why that is?

Thanks in advance for some hints!


Here is the link to the meshes, and here is the code (save it as XDMF_convert_v01.py)

import os
import meshio
from fenics import *
from mpi4py import MPI

# _________________________________________________________________ Definitions

def XDMF_convert(file_list, dir_data):

    for i, file_path in enumerate(file_list):

        # Read the mesh file.
        mesh = meshio.read(file_path)

        tetra_cells    = None
        tetra_data     = None
        triangle_cells = None
        triangle_data  = None

        for key in mesh.cell_data_dict["gmsh:physical"].keys():
            if key == "triangle":
                triangle_data = mesh.cell_data_dict["gmsh:physical"][key]
            elif key == "tetra":
                tetra_data = mesh.cell_data_dict["gmsh:physical"][key]

        for cell in mesh.cells:
            if cell.type == "tetra":
                if tetra_cells is None:
                    tetra_cells = cell.data
                else:
                    tetra_cells = np.vstack([tetra_cells, cell.data])
            elif cell.type == "triangle":
                if triangle_cells is None:
                    triangle_cells = cell.data
                else:
                    triangle_cells = np.vstack([triangle_cells, cell.data])

        tetra_mesh = meshio.Mesh(points=mesh.points,
                                 cells={"tetra": tetra_cells},
                                 cell_data={"name_to_read":[tetra_data]})

        triangle_mesh =meshio.Mesh(points=mesh.points,
                                   cells=[("triangle", triangle_cells)],
                                   cell_data={"name_to_read":[triangle_data]})

        file_tetra = os.path.join(dir_data, "Data_"+str(i)+"_tetra_mesh.xdmf")
        file_trian = os.path.join(dir_data, "Data_"+str(i)+"_mf.xdmf")
        meshio.write(file_tetra, tetra_mesh)
        meshio.write(file_trian, triangle_mesh)

        # If the following block is **excluded**, the program does come to an end,
        # even with 'mpirun -n x python3 XDMF_convert.py' with x > 1!
        # The code basically hangs due to this Mesh() command.
        #"""
        mesh = Mesh()
        with XDMFFile(file_tetra) as infile:
            infile.read(mesh)

        mvc = MeshValueCollection("size_t", mesh, 2)
        with XDMFFile(file_trian) as infile:
            infile.read(mvc, "name_to_read")
        boundaries = cpp.mesh.MeshFunctionSizet(mesh, mvc)

        mvc2 = MeshValueCollection("size_t", mesh, 3)
        with XDMFFile(file_tetra) as infile:
            infile.read(mvc2, "name_to_read")
        subdomains = cpp.mesh.MeshFunctionSizet(mesh, mvc2)
        #"""


# ________________________________________________________________________ Main

if __name__ == '__main__':

    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm.Get_rank()

    # Put all files into this sub-directory
    dir_data = "./results"

    # _____________________________________________ Only process No. 0
    if rank == 0:

        if not os.path.isdir(dir_data):
            os.mkdir(dir_data)

        list_files_all = ["./Data_0000.msh",
                         "./Data_0001.msh",
                         "./Data_0002.msh",
                         "./Data_0003.msh",
                         "./Data_0004.msh"]

        # Distribute the files into a list that contains <size> number of
        # processes.
        list_files = []
        for j in range(size):
            list_files.append([])
        for k, para in enumerate(list_files_all):
            list_files[k % size].append(para)

        print('Files:', list_files)
        print("")
    else:
        list_files = None

    # __________________________________________________ All processes
    files = comm.scatter(list_files, root=0)
    print('Process number:', rank,
          '-- The process is calculating with the following files:')
    print(files)
    print("")
    result = XDMF_convert(files, dir_data)

I couldn’t go sleeping without having solved this issue, in particular in view of my new and fast computer (16x faster than what I have had before), which I have bought very recently … . :sunglasses:

I found a solution myself. Well, one has to tell the system that you want to have the condition “file per process”, read here. One has to use: MPI.comm_self for IO processes.

Here is the running code. BTW, there was a small mistake in the indexing of the files. I also use the FEniCS wrapper for MPI. Please, let me know if you agree with all this, thx.

import os
import meshio
from fenics import *

# _________________________________________________________________ Definitions

def XDMF_convert(comm, rank, file_list, dir_data):

    for i, file_path in enumerate(file_list):

        # Read the mesh file.
        mesh = meshio.read(file_path)
        pos = file_path.rfind('.msh')
        k = file_path[pos-1:pos]
        print('    ', rank, file_path, k)

        tetra_cells    = None
        tetra_data     = None
        triangle_cells = None
        triangle_data  = None

        for key in mesh.cell_data_dict["gmsh:physical"].keys():
            if key == "triangle":
                triangle_data = mesh.cell_data_dict["gmsh:physical"][key]
            elif key == "tetra":
                tetra_data = mesh.cell_data_dict["gmsh:physical"][key]

        for cell in mesh.cells:
            if cell.type == "tetra":
                if tetra_cells is None:
                    tetra_cells = cell.data
                else:
                    tetra_cells = np.vstack([tetra_cells, cell.data])
            elif cell.type == "triangle":
                if triangle_cells is None:
                    triangle_cells = cell.data
                else:
                    triangle_cells = np.vstack([triangle_cells, cell.data])

        tetra_mesh = meshio.Mesh(points=mesh.points,
                                 cells={"tetra": tetra_cells},
                                 cell_data={"name_to_read":[tetra_data]})

        triangle_mesh =meshio.Mesh(points=mesh.points,
                                   cells=[("triangle", triangle_cells)],
                                   cell_data={"name_to_read":[triangle_data]})

        file_tetra = os.path.join(dir_data, "Data_"+str(k)+"_tetra_mesh.xdmf")
        file_trian = os.path.join(dir_data, "Data_"+str(k)+"_mf.xdmf")
        meshio.write(file_tetra, tetra_mesh)
        meshio.write(file_trian, triangle_mesh)

        mesh = Mesh(MPI.comm_self)
        with XDMFFile(MPI.comm_self, file_tetra) as infile:
            infile.read(mesh)

        mvc = MeshValueCollection("size_t", mesh, 2)
        with XDMFFile(MPI.comm_self, file_trian) as infile:
            infile.read(mvc, "name_to_read")
        boundaries = cpp.mesh.MeshFunctionSizet(mesh, mvc)

        mvc2 = MeshValueCollection("size_t", mesh, 3)
        with XDMFFile(MPI.comm_self, file_tetra) as infile:
            infile.read(mvc2, "name_to_read")
        subdomains = cpp.mesh.MeshFunctionSizet(mesh, mvc2)

# ________________________________________________________________________ Main

if __name__ == '__main__':

    comm = MPI.comm_world
    size = comm.Get_size()
    rank = comm.Get_rank()

    # Put all files into this sub-directory
    dir_data = "./results"

    # _____________________________________________ Only process No. 0
    if rank == 0:

        if not os.path.isdir(dir_data):
            os.mkdir(dir_data)

        list_files_all = ["./Data_0000.msh",
                         "./Data_0001.msh",
                         "./Data_0002.msh",
                         "./Data_0003.msh",
                         "./Data_0004.msh"]

        # Distribute the files into a list that contains <size> number of
        # elements. They are distributed over the processes.
        list_files = []
        for j in range(size):
            list_files.append([])
        for k, para in enumerate(list_files_all):
            list_files[k % size].append(para)

        print('    Files:', list_files)
        print("")
    else:
        list_files = None

    # __________________________________________________ All processes
    files = comm.scatter(list_files, root=0)
    print('    Process number:', rank, '-- Files:', files)
    result = XDMF_convert(comm, rank, files, dir_data)
    print("")

At a quick glance it looks like you’re reading and writing with meshio on all processes. As far as I’m aware meshio is not designed for parallel computation. Perhaps you should do that step on rank 0 and read in the XDMF file using dolfin in parallel.

I’m not entirely sure what you’re trying to achieve. If you just want to process lots of meshes on each MPI process separately, you should map your function to a single process split of COMM_WORLD.

1 Like

Good morning nate,

thanks for your comments.

Well, I have a capacitor where I change the distance between two electrodes for instance (100 points). At each distance, I calculate the electric field and other things.

Yes, true. However, each process gets a different file and I understand it such that then each process is just doing its job (reading .geo file and converting to .msh)

Yep, thanks. I will do this for the case that it does not work.

Aha, okay. Do you have a link to some examples or some code? I would be very happy. Do you also have an idea why I have to use mpirun.mpich instead of mpirun (several copies of the program are executed) ?

Dear all.

During the last two months, I have created some large FEniCS program and started last week to implement parallel computing utilities, in particular MPI. This has taken a long time because I had to search a long time for explanations, solutions, etc. However, at the end I got what I wanted. :sunglasses:

To share my experiences with others, I attach a small example at the end of this message (FEniCS_mesh_solve_parallel_v01.py). It illustrates how one can use FEniCS with MPI. The example describes a sphere-plane capacitor described in 4 .msh files. The distance is changed from file to file, from 1 to 5 rel. units (could be nm). Such mesh files can be obtained as I have described here.

The objective of the code is that one uses 4 processes to calculate the electrostatic force. So, one process gets one file. This can be done with:

mpirun.mpich -n 4 python3 FEniCS_mesh_solve_parallel_v01.py

Note that I have to use mpirun.mpich! The command mpirun does not well work in my case (FEniCS version 2019.1.0 (docker), Linux Lubuntu 64Bit) - several instances of the code are executed, which does not make sense.

With respect to MPI, I make use of MPI scatter and MPI gather, which works very well in general. The scatter distributes the calculation work onto the processes whereas the gather command collects all results. Tricky to find in the Internet has been the FEniCS specific stuff :smiley:. In the code, I mark the places where one has to pay attention. It is in particular this MPI.comm_self, which made my life quite uneasy the last days.

Note that this is my way of doing things, and it might be not well done. :stuck_out_tongue_winking_eye: However, in my cases the principle of the code works. Let me know what you think, thanks.

Here is the link to the mesh files.

# ##### BEGIN GPL LICENSE BLOCK #####
#
#  This program is free software; you can redistribute it and/or
#  modify it under the terms of the GNU General Public License
#  as published by the Free Software Foundation; either version 2
#  of the License, or (at your option) any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program; if not, write to the Free Software Foundation,
#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
#
#  This is a demo of how one can use FEniCS and MPI. In particular, I use
#  MPI scatter and MPI gather, and also MPI.comm_world/MPI.comm_self.
#
#  Note that this code is my solution and could be a stupid one, I don't know.
#  In my case it works. ;-) Feel free to comment/critisize, etc.
#
#
#  The 4 mesh files show a sphere above a plane, which both form a capacitor.
#  From file 0 to 4, the distance is increased (from 1 to 5). In the mesh,
#  identifier 1 is the sphere, identifier 2 the plane and identifier 3 the
#  spherical domain.
#
#  Dr. Clemens Barth
#
#  Last change: 2020-06-16
#
#

import os
import scipy.constants as const
import meshio
from fenics import *

# _________________________________________________________________ Definitions


# Convert the mesh (.msh) into the XDMF format.
def XDMF_convert(rank, file_path, dir_data):

    #print('    XDMF_convert:', rank, file_path)
    mesh = meshio.read(file_path)
    pos = file_path.rfind('.msh')
    k = file_path[pos-1:pos]

    tetra_cells    = None
    tetra_data     = None
    triangle_cells = None
    triangle_data  = None

    for key in mesh.cell_data_dict["gmsh:physical"].keys():
        if key == "triangle":
            triangle_data = mesh.cell_data_dict["gmsh:physical"][key]
        elif key == "tetra":
            tetra_data = mesh.cell_data_dict["gmsh:physical"][key]

    for cell in mesh.cells:
        if cell.type == "tetra":
            if tetra_cells is None:
                tetra_cells = cell.data
            else:
                tetra_cells = np.vstack([tetra_cells, cell.data])
        elif cell.type == "triangle":
            if triangle_cells is None:
                triangle_cells = cell.data
            else:
                triangle_cells = np.vstack([triangle_cells, cell.data])

    tetra_mesh = meshio.Mesh(points=mesh.points,
                             cells={"tetra": tetra_cells},
                             cell_data={"name_to_read":[tetra_data]})

    triangle_mesh =meshio.Mesh(points=mesh.points,
                               cells=[("triangle", triangle_cells)],
                               cell_data={"name_to_read":[triangle_data]})

    file_tetra = os.path.join(dir_data, "Data_"+str(k)+"_tetra_mesh.xdmf")
    file_trian = os.path.join(dir_data, "Data_"+str(k)+"_mf.xdmf")
    meshio.write(file_tetra, tetra_mesh)
    meshio.write(file_trian, triangle_mesh)


# Load the mesh (XDMF) into FEniCS.
def load_into_fenics(rank, file_path, dir_data):

    #print('    load_into_fenics:', rank, file_path)
    pos = file_path.rfind('.msh')
    k = file_path[pos-1:pos]

    file_tetra = os.path.join(dir_data, "Data_"+str(k)+"_tetra_mesh.xdmf")
    file_trian = os.path.join(dir_data, "Data_"+str(k)+"_mf.xdmf")

    #
    # Important: note the 'MPI.comm_self' in the following lines!
    #
    mesh = Mesh(MPI.comm_self)
    with XDMFFile(MPI.comm_self, file_tetra) as infile:
        infile.read(mesh)

    mvc = MeshValueCollection("size_t", mesh, 2)
    with XDMFFile(MPI.comm_self, file_trian) as infile:
        infile.read(mvc, "name_to_read")
    boundaries = cpp.mesh.MeshFunctionSizet(mesh, mvc)

    mvc2 = MeshValueCollection("size_t", mesh, 3)
    with XDMFFile(MPI.comm_self, file_tetra) as infile:
        infile.read(mvc2, "name_to_read")
    subdomains = cpp.mesh.MeshFunctionSizet(mesh, mvc2)

    return mesh, boundaries, subdomains


# The calculation of the electric potential and field + force.
def do_calculation(rank, mesh, subdomains, boundaries, file_path, dir_data):

    dx = Measure("dx", domain=mesh, subdomain_data=subdomains)
    ds = Measure("ds", domain=mesh, subdomain_data=boundaries)

    pos = file_path.rfind('.msh')
    k = file_path[pos-1:pos]

    #print('    do_calculation:', rank)
    V  = FunctionSpace(mesh, "P", 2)

    list_b_cond = []
    # We put a voltage onto the sphere, which is the No. of the file,
    # for simplicity (k, goes from 0 to 4). ;-) The plane is on 0V.
    list_b_cond.append(DirichletBC(V, Constant(k), boundaries, 1)) # Sphere
    list_b_cond.append(DirichletBC(V, Constant(0.0), boundaries, 2))  # Plane

    # Here we apply a Robin boundary condition for the large domain - the
    # potential shall smoothly decrease at the large boundary. This is
    # done just to make use of an 'Expression':
    #
    # Important: note the 'MPI.comm_self' in the Expression!
    #
    r   = Expression("sqrt(" \
                     "pow(x[0]-rx, 2) + " \
                     "pow(x[1]-ry, 2) + " \
                     "pow(x[2]-rz, 2))", degree=1,
                     rx=0.0, ry=0.0, rz=0.0, mpi_comm=MPI.comm_self)
    p   = 1 / r
    q   = Constant(0.0)

    u = TrialFunction(V)
    v = TestFunction(V)

    a = dot(grad(u), grad(v)) * dx + p * u * v * ds(3)
    L = Constant('0') * v * dx + p * q * v * ds(3)

    u = Function(V)
    solve(a == L, u, list_b_cond,
        solver_parameters = {"linear_solver" : "gmres",
                            "preconditioner" : "amg"})

    # We save now the electrostatic potential and field.
    #
    # Important: note the 'MPI.comm_self'!
    #
    potentialFile = File(MPI.comm_self, os.path.join(dir_data, "V_" + str(k) + ".pvd"))
    potentialFile << u

    E_field = -grad(u)
    E_field_p = project(E_field, VectorFunctionSpace(mesh,'P',1), solver_type="mumps",
                        form_compiler_parameters={"cpp_optimize": True,
                                                "representation": "uflacs",
                                                "quadrature_degree": 2} )
    e_fieldfile = File(MPI.comm_self, os.path.join(dir_data, "E_" + str(k) + ".pvd"))
    e_fieldfile << E_field_p

    # We calculate the force:
    n  = FacetNormal(mesh)
    force = assemble(dot(E_field, E_field)*n[2]*ds(1)) * (const.epsilon_0/2.0)

    # As a result, we return the force.
    return force



# ________________________________________________________________________ Main

if __name__ == '__main__':

    comm = MPI.comm_world
    self = MPI.comm_self
    size = comm.Get_size()
    rank = comm.Get_rank()

    if size > 5:
        print("    To many processes asked, max. 5 are allowed. Exit!")
        exit(-1)

    # Put all files into this sub-directory
    dir_data = "./results"

    # _______________________________ Only process No. 0 is doing the following
    if rank == 0:

        print("")
        if not os.path.isdir(dir_data):
            os.mkdir(dir_data)

        list_files_all = ["./Data_0000.msh", "./Data_0001.msh",
                          "./Data_0002.msh", "./Data_0003.msh",
                          "./Data_0004.msh"]

        # Convert the .msh files into the XDMF format. Note that this is only
        # done by process No. 0!
        for file_path in list_files_all:
            XDMF_convert(rank, file_path, dir_data)

        # Distribute the files into a list that contains <size> entries. The
        # size is the number of processes one is asking for, during executing
        # mpirun. Here: mpirun.mpich -n 6 python3 Solve_v04.py => 6 = size
        list_files = []
        for j in range(size):
            list_files.append([])
        for k, para in enumerate(list_files_all):
            list_files[k % size].append(para)

        for i, files in enumerate(list_files):
            print("    File list (", files, ") prepared by process 0 for process: ", i)
        print("\n\n\n")
    else:
        list_files = None

    # _____________________________________________ This concerns all processes


    # Process 0 is scattering the list, and equally important to note is:
    # each process gets its individual list 'list_files_proc'!
    list_files_proc = comm.scatter(list_files, root=0)

    # Each process is doing the following with its individual list
    # 'list_files_proc':
    list_results = []
    for file_path in list_files_proc:
        # 1. load the mesh and ...
        mesh, bound, subd = load_into_fenics(rank, file_path, dir_data)
        # ... calculate
        result = do_calculation(rank, mesh, subd, bound, file_path, dir_data)

        # Put the results into a list.
        list_results.append(result)

    # Process No. 0 is now gathering the results from all processes, including
    # the result from itself.
    list_results_all = comm.gather(list_results, root=0)

    # __________________________________ Only process No. 0 is doing the output
    if rank == 0:
        print("    Results (forces):", list_results_all)
        print("\n")
6 Likes