Hi all, I’m working with a dolfinx simulation in parallel (MPI), and I want to map the values of a function computed in a parallel mesh (PHI) to a serial version of the same mesh (e.g., loaded from an XDMF file in rank 0). I’m not sure how to do this properly, especially when it comes to matching degrees of freedom between the distributed and serial meshes.
Here I have a minimal example:
import dolfinx
import numpy as np
import ufl
from dolfinx.fem.petsc import LinearProblem
from petsc4py import PETSc
from dolfinx import mesh, fem, io
import mpi4py.MPI as MPI
# 1. Parallel mesh and function
mesh_parallel = mesh.create_unit_cube(MPI.COMM_WORLD, 10, 10, 10)
V_parallel = fem.functionspace(mesh_parallel, ("Lagrange", 1)) # only elements of order 1
PHI = fem.Function(V_parallel)
v = ufl.TestFunction(V_parallel)
u = ufl.TrialFunction(V_parallel)
rhs = fem.form(ufl.inner(ufl.grad(v), ufl.as_vector([1, 0, 0])) * ufl.dx)
a = fem.form(ufl.inner(ufl.grad(v), ufl.grad(u)) * ufl.dx)
A = fem.petsc.assemble_matrix(a)
A.assemble()
b = fem.petsc.create_vector(rhs)
fem.petsc.assemble_vector(b, rhs)
solver = PETSc.KSP().create(mesh_parallel.comm)
solver.setOperators(A)
solver.solve(b, PHI.x.petsc_vec)
PHI.x.scatter_forward()
# Save mesh to reconstruct serial mesh later
with io.XDMFFile(mesh_parallel.comm, "save_mesh.xdmf", "w") as xdmf:
xdmf.write_mesh(mesh_parallel)
# I have tries to follow the post https://fenicsproject.discourse.group/t/how-to-collect-the-global-mesh-without-writing-a-file/12445/4
dofmap = V_parallel.dofmap.list
root = 0
pci = mesh_parallel.topology.original_cell_index
ci =mesh_parallel.comm.gather(pci, root=root)
dofmaps = mesh_parallel.comm.gather(dofmap, root=root)
x = mesh_parallel.comm.gather(PHI.x.array, root=root)
if MPI.COMM_WORLD.rank == root:
with io.XDMFFile(MPI.COMM_SELF, "save_mesh.xdmf", "r") as xdmf:
serial_mesh = xdmf.read_mesh()
inverse_map = np.argsort(serial_mesh.topology.original_cell_index)
Vs = dolfinx.fem.functionspace(serial_mesh, ("Lagrange", 1))
us = dolfinx.fem.Function(Vs)
PHI_Parallel = np.concatenate(x)
PHI_Parallel_Total = np.sum(PHI_Parallel)
sum_serial = 0
dofmap_s = Vs.dofmap.list
num_dofs = dofmap_s.shape[1]
for rank, (dofm, cell_indices, x_p) in enumerate(zip(dofmaps, ci, x)):
print(f"DATA from rank {rank}:")
for i, cell in enumerate(cell_indices):
serial_cell = inverse_map[cell]
serial_dofs = dofmap_s[serial_cell]
parallel_dofs = dofm[i]
for j in range(num_dofs):
#print(x_p[parallel_dofs[j]])
sum_serial +=x_p[parallel_dofs[j]]
print(sum_serial-PHI_Parallel_Total) # Error, is not zero
#Also I have tried to consider the global dofs
dof_local = V_parallel.dofmap.index_map.size_local
num_owned_nodes = mesh_parallel.geometry.index_map().size_local
global_input_index = mesh_parallel.geometry.input_global_indices[:num_owned_nodes]
global_input_index_comm = mesh_parallel.comm.gather(global_input_index, root=0)
PHI_comm= mesh_parallel.comm.gather(PHI.x.array[:dof_local])
mesh_nodes = mesh_parallel.comm.gather(mesh_parallel.geometry.x) # element order 1, this consider the nodes+ ghost
if MPI.COMM_WORLD.rank == 0:
with io.XDMFFile(MPI.COMM_SELF, "save_mesh.xdmf", "r") as xdmf:
serial_mesh = xdmf.read_mesh()
V_serial = fem.functionspace( serial_mesh, ("Lagrange", 1))
PHI_serial = fem.Function(V_serial)
print(mesh_nodes)
for i in range(len(global_input_index_comm)):
mesh_nodes[i][global_input_index_comm[i]] #obtain error IndexError: index 1188 is out of bounds for axis 0 with size 799
Thanks