Parallel for-loop with multiple solve-calls, MPI

It still seems like you haven’t read the previous post I linked to. You are in your example creating the mesh with the global MPI communicator, aka. a distributed mesh.
What you could do is to use the comm_self communicator to solve one problem with one mesh per processor. Then you can gather the local vector (which is the array containing your solution, and post-process it).
See the example below:

from dolfin import *
import numpy as np

comm = MPI.comm_world
size = comm.Get_size()
rank = comm.Get_rank()

numDataPerRank = 1

def poisson(alpha):
    mesh = UnitSquareMesh(MPI.comm_self, 10, 10)
    V = FunctionSpace(mesh, 'CG', 1)
    u, v = TrialFunction(V), TestFunction(V)
    bc = DirichletBC(V, Constant(0), 'on_boundary')
    a = inner(Constant(alpha)*grad(u), grad(v))*dx
    L = inner(Constant(1), v)*dx
    uh = Function(V)
    print(len(uh.vector().get_local()))
    solve(a == L, uh, bc)
    return uh

alphas = [3, 8, 9, 10]
assert len(alphas) >= MPI.size(MPI.comm_world)

# Get alpha based on global rank
my_alpha = alphas[MPI.rank(MPI.comm_world)]
u_sol = poisson(my_alpha)

# Gathered solution
u_sol_gath = None
if rank == 0:
    u_sol_gath =  np.empty(numDataPerRank*size, dtype='f')

u_sol_gath = MPI.comm_world.gather(u_sol.vector().get_local(), root=0)


if rank == 0:
    print('Rank: ',rank, ', Gathered solution: ', len(u_sol_gath))
    u_output = Function(u_sol.function_space())
    out = File(MPI.comm_self, "output.pvd")
    for i in range(size):
        u_output.vector()[:] = u_sol_gath[i]
        out << u_output
2 Likes