How to correctly map function values between a parallel and a serial mesh

Hi all, I’m working with a dolfinx simulation in parallel (MPI), and I want to map the values of a function computed in a parallel mesh (PHI) to a serial version of the same mesh (e.g., loaded from an XDMF file in rank 0). I’m not sure how to do this properly, especially when it comes to matching degrees of freedom between the distributed and serial meshes.

Here I have a minimal example:

import dolfinx
import numpy as np
import ufl
from dolfinx.fem.petsc import LinearProblem
from petsc4py import PETSc
from dolfinx import mesh, fem, io
import mpi4py.MPI as MPI

# 1. Parallel mesh and function
mesh_parallel = mesh.create_unit_cube(MPI.COMM_WORLD, 10, 10, 10)
V_parallel = fem.functionspace(mesh_parallel, ("Lagrange", 1)) # only elements of order 1

PHI = fem.Function(V_parallel)
v = ufl.TestFunction(V_parallel)
u = ufl.TrialFunction(V_parallel)

rhs = fem.form(ufl.inner(ufl.grad(v), ufl.as_vector([1, 0, 0])) * ufl.dx)
a = fem.form(ufl.inner(ufl.grad(v), ufl.grad(u)) * ufl.dx)
A = fem.petsc.assemble_matrix(a)
A.assemble()
b = fem.petsc.create_vector(rhs)
fem.petsc.assemble_vector(b, rhs)

solver = PETSc.KSP().create(mesh_parallel.comm)
solver.setOperators(A)
solver.solve(b, PHI.x.petsc_vec)
PHI.x.scatter_forward()

# Save mesh to reconstruct serial mesh later
with io.XDMFFile(mesh_parallel.comm, "save_mesh.xdmf", "w") as xdmf:
    xdmf.write_mesh(mesh_parallel)

# I have tries to follow the post https://fenicsproject.discourse.group/t/how-to-collect-the-global-mesh-without-writing-a-file/12445/4


dofmap = V_parallel.dofmap.list

root = 0
pci = mesh_parallel.topology.original_cell_index
ci =mesh_parallel.comm.gather(pci, root=root)
dofmaps = mesh_parallel.comm.gather(dofmap, root=root)
x = mesh_parallel.comm.gather(PHI.x.array, root=root)

if MPI.COMM_WORLD.rank == root:
    with io.XDMFFile(MPI.COMM_SELF, "save_mesh.xdmf", "r") as xdmf:
        serial_mesh = xdmf.read_mesh()
    inverse_map = np.argsort(serial_mesh.topology.original_cell_index)
    Vs = dolfinx.fem.functionspace(serial_mesh, ("Lagrange", 1))
    us = dolfinx.fem.Function(Vs)

    PHI_Parallel = np.concatenate(x)

    PHI_Parallel_Total = np.sum(PHI_Parallel)

    sum_serial = 0
    
    dofmap_s = Vs.dofmap.list
    num_dofs = dofmap_s.shape[1]
    for rank, (dofm, cell_indices, x_p) in enumerate(zip(dofmaps, ci, x)):
        print(f"DATA from rank {rank}:")
        for i, cell in enumerate(cell_indices):
            serial_cell = inverse_map[cell]
            serial_dofs = dofmap_s[serial_cell]
            parallel_dofs = dofm[i]
            for j in range(num_dofs):
                #print(x_p[parallel_dofs[j]])
                sum_serial +=x_p[parallel_dofs[j]]

    print(sum_serial-PHI_Parallel_Total) # Error, is not zero

#Also I have tried to consider the global dofs

dof_local = V_parallel.dofmap.index_map.size_local 
num_owned_nodes = mesh_parallel.geometry.index_map().size_local
global_input_index = mesh_parallel.geometry.input_global_indices[:num_owned_nodes]
global_input_index_comm = mesh_parallel.comm.gather(global_input_index, root=0)
PHI_comm= mesh_parallel.comm.gather(PHI.x.array[:dof_local])
mesh_nodes = mesh_parallel.comm.gather(mesh_parallel.geometry.x) # element order 1, this consider the nodes+ ghost


if MPI.COMM_WORLD.rank == 0:
    

    with io.XDMFFile(MPI.COMM_SELF, "save_mesh.xdmf", "r") as xdmf:
        serial_mesh = xdmf.read_mesh()
    V_serial = fem.functionspace( serial_mesh, ("Lagrange", 1))
    PHI_serial = fem.Function(V_serial)
    print(mesh_nodes)
    for i in range(len(global_input_index_comm)):
        mesh_nodes[i][global_input_index_comm[i]] #obtain error IndexError: index 1188 is out of bounds for axis 0 with size 799

Thanks

See for instance: Gather solutions in parallel in FEniCSX - #9 by dokken

Hi dokken,

I have tried incorporating what’s shown in “Gather solutions in parallel in FEniCSX” (see Part I of the code I posted), but it doesn’t work correctly — the sum of the parallel and serial results are different.

Here you are gathering both owned and ghosted entries, which would give different sums. See for instance: Parallel Computations with Dolfinx using MPI — MPI tutorial

Thanks, but I tried something similar in step II. For example, if I select the ghost dof using:

ghosts = V_parallel.dofmap.index_map.ghosts
ghosts = mesh_parallel.comm.gather(ghosts, root=root)

and remove the ghost DOFs from the mesh nodes (since the element are of order 1 ):

mesh_gather = mesh_parallel.comm.gather(mesh_parallel.geometry.x, root=root)

if MPI.COMM_WORLD.rank == root:
    ghost_index = np.concatenate(ghosts)
    mesh_gather = np.concatenate(mesh_gather)
    mesh_gather = np.delete(mesh_gather, ghost_index ,axis=0 )

I find that mesh_gather has the same size as the serial mesh, but has duplicate elements, which I assume is incorrect.