MPIRUN - writing out text in parallel

Hello all,

I have a (3,3) tensor variable for stress, and I want like to write the nodal/cell values into a .txt file, in parallel. I receive the values as:
localstress = stresses.vector().get_local()

However, when I write this to a filestream:
StrStrOutput = open("Stress.txt",'w')

multiple incomplete copies of the variable are written, when using mpirun. I tried using

if MPI.COMM_WORLD.rank == 0:

However, it would either slow down the entire solution process or freeze.

Is there any inbuilt FEniCS function to do this in parallel, similar to the XDMF write output?

You should use MPI to gather all the local data on a single process, and then write it to file if you want to save it to txt format.

2 Likes

Hi Dokken,
Could you give me an example?
I tried to follow this example, but, I could not get it to work.

I wanted to get an average value of the stress and strain across the mesh.

The following is my attempt:

    localstrain = strainss.vector().get_local()
    localstress = stresses.vector().get_local()
    MPI.COMM_WORLD.barrier()
    localstrain = mesh.mpi_comm().allgather(localstrain)
    localstress = mesh.mpi_comm().allgather(localstress)
    MPI.COMM_WORLD.barrier()
    if MPI.COMM_WORLD.rank ==0:
        StrStrOutput.write("\n"+str(np.average(localstrain[0::9])/Volume)+"\t"+str(np.average(localstrain[4::9])/Volume)+"\t"+str(np.average(localstress[0::9])/Volume)+"\t"+str(np.average(localstress[4::9])/Volume))
        StrStrOutput.flush()

Please supply a minimal working code example, which:

  1. Illustrates what you want to achieve
  2. Produces either an error message or some behaviour that is unexpected (and then explain why it is unexpected)
  3. The code should be executable on any system that has dolfin installed. See: Read before posting: How do I get my question answered? - #4 by nate for a list of questions that follows this structure.

Hi Dokken,

I apologize for not including a sample code. I would like to run the following uniaxial linear elasticity example in parallel using mpirun:

from ufl import shape
from ufl import indices
from dolfin import *
from mshr import *
import numpy as np
import os
import sys

# Scaled variables
L = 1
W = 1
mu = 1
rho = 1
delta = W/L
gamma = 0.4*delta**2
beta = 1.25
lambda_ = beta
g = gamma

T_Steps = 10
dt = 0.01
# Create mesh and define function space
mesh = BoxMesh(Point(0, 0, 0), Point(L, W, W), 6, 6, 6)
Volume = L*W*W
V = VectorFunctionSpace(mesh, 'CG', 2)
TensorDG0 = TensorFunctionSpace(mesh, 'DG', 0)

# Define boundary condition
tol = 1E-14

Str_Rate = 1.0

def clamped_boundary(x, on_boundary):
    return on_boundary and x[0] < tol

def allNodes(x, on_boundary):       return x[0] > -tol
def X0(x, on_boundary):             return near(x[0],0.0,DOLFIN_EPS)
def Y0(x, on_boundary):             return near(x[1],0.0,DOLFIN_EPS)
def Z0(x, on_boundary):             return near(x[2],0.0,DOLFIN_EPS)

t = 0.0

Displacements = Expression(('x[0]*StrRate*time'),degree = 0,  StrRate = Str_Rate,time = t)

bc1 = DirichletBC(V.sub(0), Displacements, allNodes,method='pointwise')
bc2 = DirichletBC(V.sub(1), Constant(0.), Y0)
bc3 = DirichletBC(V.sub(2), Constant(0.), Z0)

bc = [bc1,bc2,bc3]
# Define strain and stress

def epsilon(u):
    return sym(nabla_grad(u))

def sigma(u):
    return lambda_*div(u)*Identity(d) + 2*mu*epsilon(u)

# Define variational problem
u = TrialFunction(V)
d = u.geometric_dimension()  # space dimension
v = TestFunction(V)
f = Constant((0, 0, 0))
T = Constant((0, 0, 0))
a = inner(sigma(u), epsilon(v))*dx
L = dot(f, v)*dx + dot(T, v)*ds

# Compute solution
u = Function(V)

StrStrOutput = open("Output.txt",'w')
StrStrOutput.write("strainss[11] \t strainss[22] \t \
             stresses[11] \t stresses[22] ")

fileResults = XDMFFile('Solution/Output.xdmf')
fileResults.parameters["flush_output"] = True
fileResults.parameters["functions_share_mesh"] = True



for i in range(T_Steps):

    print("Timestep ",t)
    
    t+=dt
    
    Displacements.time = t

    solve(a == L, u, bc)



    stress = project(sigma(u),TensorDG0)
    strain = project(epsilon(u),TensorDG0)


    localstrain = strain.vector().get_local()
    localstress = stress.vector().get_local()


    StrStrOutput.write("\n"+str(localstrain[0::9].mean()/Volume)+"\t"+str(localstrain[4::9].mean()/Volume) \
                          +"\t"+str(localstress[0::9].mean()/Volume)+"\t"+str(localstress[4::9].mean()/Volume))

    StrStrOutput.flush()

    #u.rename("Displacement","label")
    #stress.rename("Stress","label")
    #strain.rename("Strain","label")
    #fileResults.write(u, t)
    #fileResults.write(stress, t)
    #fileResults.write(strain, t)

I tried to gather the data for the root thread, but the value is inconsistent to serial execution,

    localstrain = strainss.vector().get_local()
    localstress = stresses.vector().get_local()
    MPI.COMM_WORLD.barrier()
    localstrain = mesh.mpi_comm().allgather(localstrain)
    localstress = mesh.mpi_comm().allgather(localstress)
    MPI.COMM_WORLD.barrier()
    if MPI.COMM_WORLD.rank ==0:
        StrStrOutput.write("\n"+str(np.average(localstrain[0::9])/Volume)+"\t"+str(np.average(localstrain[4::9])/Volume)+"\t"+str(np.average(localstress[0::9])/Volume)+"\t"+str(np.average(localstress[4::9])/Volume))
        StrStrOutput.flush()

Thank you!

Dear @pgnikhil,

There are several things in your code that should be removed for it to be considered as a minimal example. You have several unused variables, commented code and the time loop is not necessary to reproduce your issue.

To solve you problem, you need to realize that

returns a vector of numpy arrays (one per process). Thus, to get the actual average, you need to do the following:


from dolfin import *
import numpy as np

# Scaled variables
L = 1
W = 1
mu = 1
delta = W/L
beta = 1.25
lambda_ = beta

dt = 0.01

# Create mesh and define function space
mesh = BoxMesh(Point(0, 0, 0), Point(L, W, W), 2, 2, 2)
Volume = L*W*W
V = VectorFunctionSpace(mesh, 'CG', 2)
TensorDG0 = TensorFunctionSpace(mesh, 'DG', 0)

# Define boundary condition
tol = 1E-14

Str_Rate = 1.0

def clamped_boundary(x, on_boundary):
    return on_boundary and x[0] < tol

def allNodes(x, on_boundary):       return x[0] > -tol
def X0(x, on_boundary):             return near(x[0],0.0,DOLFIN_EPS)
def Y0(x, on_boundary):             return near(x[1],0.0,DOLFIN_EPS)
def Z0(x, on_boundary):             return near(x[2],0.0,DOLFIN_EPS)

t = 0.0

Displacements = Expression(('x[0]*StrRate*time'),degree = 0,  StrRate = Str_Rate,time = t)

bc1 = DirichletBC(V.sub(0), Displacements, allNodes,method='pointwise')
bc2 = DirichletBC(V.sub(1), Constant(0.), Y0)
bc3 = DirichletBC(V.sub(2), Constant(0.), Z0)

bc = [bc1,bc2,bc3]
# Define strain and stress

def epsilon(u):
    return sym(nabla_grad(u))

def sigma(u):
    return lambda_*div(u)*Identity(d) + 2*mu*epsilon(u)

# Define variational problem
u = TrialFunction(V)
d = u.geometric_dimension()  # space dimension
v = TestFunction(V)
f = Constant((0, 0, 0))
T = Constant((0, 0, 0))
a = inner(sigma(u), epsilon(v))*dx
L = dot(f, v)*dx + dot(T, v)*ds

# Compute solution
u = Function(V)

if MPI.comm_world.rank ==0:
    StrStrOutput = open(f"Output_{MPI.comm_world.size}.txt",'w')
    StrStrOutput.write("strainss[11] \t strainss[22] \t \
                stresses[11] \t stresses[22] ")

    
t+=dt
    
Displacements.time = t

solve(a == L, u, bc)
stress = project(sigma(u),TensorDG0)
strain = project(epsilon(u),TensorDG0)
localstrain = strain.vector().get_local()
localstress = stress.vector().get_local()

MPI.comm_world.barrier()
localstrain = np.hstack(mesh.mpi_comm().allgather(localstrain))
localstress = np.hstack(mesh.mpi_comm().allgather(localstress))
MPI.comm_world.barrier()
if MPI.comm_world.rank ==0:
    StrStrOutput.write("\n"+str(np.average(localstrain[0::9])/Volume)+"\t"+str(np.average(localstrain[4::9])/Volume)+"\t"+str(np.average(localstress[0::9])/Volume)+"\t"+str(np.average(localstress[4::9])/Volume))
    StrStrOutput.flush()

Hi Dokken,

I tested the code and it works well!
Thank you!