Hello,
I would like to custom partition my parallel mesh. Say I am working on the computational domain [0, 2] \times [0, 2], then with four processors I would like process 0 to own the unit square [0, 1] \times [0, 1], process 1 to have [1, 2] \times [0, 1] and so on.
I managed to do it for the case in which the mesh is created with dolfinx.mesh.create_mesh
, because I can control the cells which are assigned to each processor. See the code below.
from mpi4py import MPI
import numpy as np
import dolfinx
comm = MPI.COMM_WORLD
gdim = 2
shape = 'quadrilateral'
degree = 1
cell = ufl.Cell(shape, geometric_dimension=gdim)
domain = ufl.Mesh(ufl.VectorElement('Lagrange', cell, degree))
Lx, Ly = 1, 1
px, py = 2, 2
assert comm.size == px * py
N = 101
xspacing, yspacing = px * N - px + 1, py * N - py + 1
X, Y = np.meshgrid(
np.linspace(0, Lx * px, xspacing),
np.linspace(0, Ly * py, yspacing),
)
X, Y = X.flatten(), Y.flatten()
coords = np.stack([X, Y]).transpose()
items = np.arange(xspacing * yspacing - xspacing - 1)
items = items[(items + 1) % xspacing != 0]
global_cells = np.array(
np.vstack([[items, items + 1, items + xspacing, items + xspacing + 1]]),
dtype=np.int32,
).transpose()
rank_bounds = {}
for prank in range(comm.size):
i, j = prank % px, prank // px
rank_bounds[prank] = (i, i+1, j, j+1)
xmin, xmax, ymin, ymax = rank_bounds[comm.rank]
conds = np.logical_and(np.logical_and(coords[global_cells][:, 0, 0] >= xmin, coords[global_cells][:, -1, 0] <= xmax), np.logical_and(coords[global_cells][:, 0, 1] >= ymin, coords[global_cells][:, -1, 1] <= ymax))
cells = global_cells[conds]
def partitioner(comm, nparts, local_graph, num_ghost_nodes):
"""Leave cells on the current rank"""
dest = np.full(len(cells), comm.rank, dtype=np.int32)
return dolfinx.graph.create_adjacencylist(dest)
mesh = dolfinx.mesh.create_mesh(comm, cells, coords, domain, partitioner)
However, is there a way to do it with a mesh object defined by dolfinx.mesh.create_rectangle
?
Also, where can I find documentation concerning the partitioner
object syntax? I was not able to find it.