Hello!
Cell to vertex mapping using domain.topology.connectivity(dim, 0)
gives different output from domain.geometry.dofmap
. Here is my code below:
from dolfinx import mesh, fem, la
import numpy as np
import ufl
from mpi4py import MPI
filepath = "results_new/"
filename = "mass_output"
fl = open(filepath + filename + '.txt', 'a')
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# create mesh
domain = domain = mesh.create_unit_cube(comm, 2, 2, 2, ghost_mode=mesh.GhostMode.shared_facet)
# dimension of mesh/domain
dim = domain.topology.dim
# initialized mesh topology
domain.topology.create_connectivity(dim, 0)
domain.topology.create_connectivity(0, dim)
# get connectivity matrix from topology
cell_to_vertices_topology = domain.topology.connectivity(dim, 0)
# number of cells and vertices
num_cells = domain.topology.index_map(dim)
# cell to vertex map from geometry
dof_map = domain.geometry.dofmap
cell_to_vertices_dof_map = np.array(dof_map)
fl.write(f"rank: {rank} \n Using topology \t Using geometry dof map \n")
for i in range(num_cells.size_local + num_cells.num_ghosts):
fl.write(f"{cell_to_vertices_topology.links(i)} \t {cell_to_vertices_dof_map[i]} \n")
I calculated mass vector using both mapping as code given below which gives different outputs
from dolfinx import mesh, fem, la
import numpy as np
import ufl
from mpi4py import MPI
filepath = "results_new/"
filename = "mass_output"
fl = open(filepath + filename + '.txt', 'a')
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# create mesh
domain = domain = mesh.create_unit_cube(comm, 2, 2, 2, ghost_mode=mesh.GhostMode.shared_facet)
# dimension of mesh/domain
dim = domain.topology.dim
# definition of function space
degree = 1
shape = (dim,)
W = fem.functionspace(domain, ("P", degree))
DG = fem.functionspace(domain, ("DG", 0))
# Volume of each cell
cell_volume_form = fem.form(ufl.TestFunction(DG) * ufl.dx)
volumes = fem.assemble_vector(cell_volume_form)
# initialized mesh topology
domain.topology.create_connectivity(dim, 0)
domain.topology.create_connectivity(0, dim)
# get connectivity matrix from topology
cell_to_vertices_topology = domain.topology.connectivity(dim, 0)
# number of cells and vertices
num_cells = domain.topology.index_map(dim)
# initialized mass vector
mass_topology = fem.Function(W)
for cell_index in range(num_cells.size_local + num_cells.num_ghosts):
cell_vertices = cell_to_vertices_topology.links(cell_index)
volume = volumes.array[cell_index]
# Calculating mass vector
mass_per_vertex = rho * volume / len(cell_vertices)
mass_topology.x.array[cell_vertices] += mass_per_vertex
mass_topology.x.scatter_reverse(la.InsertMode.add)
mass_topology.x.scatter_forward()
fl.write(f"Rank {rank}: \n Mass vector using topology: \n {mass_topology.x.array[:]}\n")
# cell to vertex map from geometry
dof_map = domain.geometry.dofmap
cell_to_vertices_dof_map = np.array(dof_map)
# initialized mass vector
mass_dof_map = fem.Function(W)
for cell_index in range(num_cells.size_local + num_cells.num_ghosts):
cell_vertices = dof_map[cell_index]
volume = volumes.array[cell_index]
# Calculating mass vector
mass_per_vertex = rho * volume / len(cell_vertices)
mass_dof_map.x.array[cell_vertices] += mass_per_vertex
mass_dof_map.x.scatter_reverse(la.InsertMode.add)
mass_dof_map.x.scatter_forward()
fl.write(f"Mass vector using dof map: \n {mass_dof_map.x.array[:]}\n")
mass_form = fem.form(ufl.TestFunction(W) * rho * ufl.dx)
mass_vector = fem.assemble_vector(mass_form)
mass_vector.scatter_reverse(la.InsertMode.add)
mass_vector.scatter_forward()
fl.write(f"Mass vector using assemble vector: \n {mass_vector.array[:]} \n \n")
I compared results with single processor run which shows mass vector is correct from geometry.dofmap
Please help me to resolve this problem as I need topology.connectivity
of different dimensions to communicate data between processors.
If this is already resolved, where to find and how to integrate that in current version.
I am using following setup of Fenicsx:
OS: Ubuntu 24.04.2 LTS
Fenicsx v0.9.0 using conda
command to run: mpirun -np 2 python3 test.py
Thank you in advance!