Hello Community,
I have some issues/questions with the computation of a homogenization framework in parallel. I would like to test different load cases in a for loop and calculate the volume average for a certain quantity. I am already using the MPI allreduce
function to calculate the integral. However, computing the problem with a different number of processes gives different results. In the following, the results seem to be rather small, but in my real world problem with a much more complicated and larger mesh, the differences between the results are up to 40%.
Number of processes: 1
Case 0: Average heat flow in x-direction: -1.51250491169694
Number of processes: 5
Case 0: Average heat flow in x-direction: -1.5125100523083435
Therefore, I want to ask if there is anything else I need to consider, such as updating ghost values due to the interpolation within the loop.
Here is my MWE:
# Load required libraries
#########################
import gmsh
import dolfinx
from dolfinx import fem, io, mesh
from dolfinx.io.gmshio import model_to_mesh
import numpy as np
from mpi4py import MPI
import ufl
from petsc4py import PETSc
##############################################################################
# Generate mesh #
##############################################################################
gmsh.initialize()
comm = MPI.COMM_WORLD
rank = MPI.COMM_WORLD.rank
L = 1 # length of rectangle
f = 0.3 # volume fraction of inclusion
r = np.sqrt(f*L**2/np.pi) # radius of inclusion
gdim = 2 # dimension
if rank == 0:
inclusion = gmsh.model.occ.addDisk(L / 2, L / 2, 0, r, r)
gmsh.model.occ.synchronize()
matrix = gmsh.model.occ.addRectangle(0,0,0, L, L)
gmsh.model.occ.synchronize()
whole_domain = gmsh.model.occ.fragment([(2, matrix)], [(2, inclusion)])
gmsh.model.occ.synchronize()
gmsh.model.addPhysicalGroup(gdim, [matrix], tag = 1)
gmsh.model.addPhysicalGroup(gdim, [inclusion], tag = 2)
background_surfaces = []
inclusion_surfaces = []
for domain in whole_domain[0]:
com = gmsh.model.occ.getCenterOfMass(domain[0], domain[1])
mass = gmsh.model.occ.getMass(domain[0], domain[1])
if np.isclose(mass, np.pi * (r ** 2)): # identify inclusion by its mass
inclusion_surfaces.append(domain)
else:
background_surfaces.append(domain)
gmsh.model.mesh.field.add("Distance", 1)
edges = gmsh.model.getBoundary(inclusion_surfaces, oriented=False)
gmsh.model.mesh.field.setNumbers(1, "EdgesList", [e[1] for e in edges])
gmsh.model.mesh.field.add("Threshold", 2)
gmsh.model.mesh.field.setNumber(2, "IField", 1)
gmsh.model.mesh.field.setNumber(2, "LcMin", 0.01)
gmsh.model.mesh.field.setNumber(2, "LcMax", 0.05)
gmsh.model.mesh.field.setNumber(2, "DistMin", 0.025)
gmsh.model.mesh.field.setNumber(2, "DistMax", 0.1)
gmsh.model.mesh.field.setAsBackgroundMesh(2)
gmsh.option.setNumber("Mesh.Algorithm", 6)
gmsh.model.mesh.generate(gdim)
domain, ct, _ = model_to_mesh(gmsh.model, comm, 0, gdim=gdim)
gmsh.finalize()
with io.XDMFFile(domain.comm, "MeshWithGmsh_v2.xdmf", "w") as xdmf:
xdmf.write_mesh(domain)
xdmf.write_meshtags(ct)
dx = ufl.Measure('dx', domain=domain, subdomain_data=ct)
x_coor = ufl.SpatialCoordinate(domain) # coordinates
num_cells = domain.topology.index_map(domain.topology.dim).size_local
##############################################################################
# Function space and elements #
##############################################################################
P1 = ufl.FiniteElement("CG", domain.ufl_cell(), 1)
V = fem.FunctionSpace(domain, P1)
### for linear problem:
T_fluc = ufl.TrialFunction(V) # fluctuation: primary variable linear problem
v = ufl.TestFunction(V) # testfunction
##############################################################################
# Dirichlet boundary conditions #
##############################################################################
bcs = []
for i in range(gdim):
def dirichletboundary(x):
return np.logical_or(np.isclose(x[i], domain.geometry.x[:, i].min()),
np.isclose(x[i], domain.geometry.x[:, i].max()))
facets = mesh.locate_entities_boundary(domain, gdim-1 , dirichletboundary)
topological_dofs = fem.locate_dofs_topological(V, gdim - 1, facets)
bcs.append(fem.dirichletbc(PETSc.ScalarType(0.0), topological_dofs, V))
##############################################################################
# Definition of parameters #
##############################################################################
# total volume
V_tot = domain.comm.allreduce(fem.assemble_scalar(fem.form(1*dx)), op=MPI.SUM) # for parallel computing
# material parameters
S = fem.FunctionSpace(domain, ("DG", 0))
kappa = fem.Function(S)
for i in range(num_cells):
if ct.values[i] == 1: # matrix
kappa.x.array[i] = 1
elif ct.values[i] == 2: # inclusions
kappa.x.array[i] = 5
### prescribed Gradient \bar{\nabla T}
T_gradient = fem.Constant(domain, PETSc.ScalarType((0,0))) # updated in the for loop
# Fourier law
def Q_Fourier(V_tot, k, gradient):
q = - k * gradient
q_avg = np.zeros(gdim)
for j in range(gdim):
q_dir = 1/V_tot * domain.comm.allreduce(fem.assemble_scalar(fem.form( q[j] * dx)), op=MPI.SUM) # for parallel computing
q_avg[j] = q_dir
return q_avg
##############################################################################
# linear variational problem #
##############################################################################
eqn = kappa * ufl.dot(ufl.grad(ufl.dot(T_gradient , x_coor) + T_fluc), ufl.grad(v)) * dx
##############################################################################
# Solve the problem for various load cases #
##############################################################################
# initialize Functions for postprocessing
W = fem.VectorFunctionSpace(domain, ("DG", 0)) # discontinuous VectorFunctionSpace for gradient
T_ges= fem.Function(V)
T_grad= fem.Function(W)
T_fluctuation = fem.Function(V)
# initialize list for postprocessing
flux_avg_x, flux_avg_y = [], [] # average flux of heat
# linear solver
a_form = ufl.lhs(eqn)
L_form = ufl.rhs(eqn)
problem = dolfinx.fem.petsc.LinearProblem(a_form, L_form, bcs=bcs)
for (j, case) in enumerate(["T_x", "T_y"]):
if j == 0:
T_gradient.value = ((1, 0,))
elif j == 1:
T_gradient.value = ((0, 1,))
T_fluc = problem.solve()
# total temperature
T_ges_expr = fem.Expression(ufl.dot(T_gradient, x_coor) + T_fluc, V.element.interpolation_points())
T_ges.interpolate(T_ges_expr)
# temperature gradient
T_grad_expr = fem.Expression(ufl.grad(T_ges), W.element.interpolation_points())
T_grad.interpolate(T_grad_expr)
# compute flux of heat
q_avg = Q_Fourier(V_tot, kappa, T_grad) # average heat flow; Fourier's law
# append to lists
flux_avg_x.append(q_avg[0]), flux_avg_y.append(q_avg[1])
if rank == 0:
print("Number of processes: ", MPI.COMM_WORLD.size)
for (j, case) in enumerate(["T_x", "T_y"]):
print(f"Case {j}: Average heat flow in x-direction: ", flux_avg_x[j])
print(f"Case {j}: Average heat flow in y-direction: ", flux_avg_y[j])