Gather solutions in parallel in FEniCSX

The two separate results you get above is the local data (You can find their ordering by communicating the local range, as done in the code below).

Im not sure why you would want to order the data as in serial, as the ordering of degrees of freedom are different (due to mesh partitioning). However, you can obtain your desired array on a single process with:

import dolfinx
import numpy
import ufl
from mpi4py import MPI

comm = MPI.COMM_WORLD
rank = comm.rank


# Parallel mesh
mesh = dolfinx.UnitSquareMesh(
    MPI.COMM_WORLD, 3, 2, dolfinx.cpp.mesh.CellType.quadrilateral)
V = dolfinx.FunctionSpace(mesh, ("CG", 1))


def func(x):
    return x[0] + 3 * x[1]


if rank == 0:
    # Mesh on one proc
    mesh0 = dolfinx.UnitSquareMesh(
        MPI.COMM_SELF, 3, 2, dolfinx.cpp.mesh.CellType.quadrilateral)
    V0 = dolfinx.FunctionSpace(mesh0, ("CG", 1))
    x0 = V0.tabulate_dof_coordinates()
    u0 = dolfinx.Function(V0)
    u0.interpolate(func)
    # Reference solution
    print(f"Serial interpolation {u0.vector.array}")

# Parallel interpolation
u = dolfinx.Function(V)
u.interpolate(func)
dolfinx.cpp.la.scatter_forward(u.x)

# Get local ranges and global size of array
imap = u.function_space.dofmap.index_map
local_range = numpy.asarray(imap.local_range, dtype=numpy.int32) * \
    u.function_space.dofmap.index_map_bs
size_global = imap.size_global * u.function_space.dofmap.index_map_bs

# Communicate ranges and local data
ranges = comm.gather(local_range, root=0)
data = comm.gather(u.vector.array, root=0)

# Communicate local dof coordinates
x = V.tabulate_dof_coordinates()[:imap.size_local]
x_glob = comm.gather(x, root=0)

if comm.rank == 0:
    # Create array with all values (received)
    global_array = numpy.zeros(size_global)
    for r, d in zip(ranges, data):
        global_array[r[0]:r[1]] = d

    # Create array with all coordinates
    global_x = numpy.zeros((size_global, 3))
    for r, x_ in zip(ranges, x_glob):
        global_x[r[0]:r[1], :] = x_

    serial_to_global = []
    for coord in x0:
        serial_to_global.append(numpy.abs(global_x-coord).sum(axis=1).argmin())

    # Create sorted array from
    u_from_global = numpy.zeros(global_array.shape)
    for serial, glob in enumerate(serial_to_global):
        u_from_global[serial] = global_array[glob]
    print(f"Gathered from parallel: {u_from_global}")
    assert(numpy.allclose(u_from_global, u0.vector.array))

but note that the serial mesh/function space has to be defined to get that mapping.

2 Likes