Hi all,
I want to transfer the shape function of the discontinuous Galerkin first order elements (DG,1) to the indices of an actual mesh i.e. in the matrix form having the size (number of nodes X number of elements). I noticed I can do this, by making use of c2v function mentioned in this post. Application of point forces, mapping vertex indices to corresponding DOFs
For the MWE shown below it works seemlessly sequentially, although very slow for the large scale problems which I understand can be improved by using the numba JIT.
# Scaled variable
import pyvista
from mpi4py import MPI
from dolfinx import mesh, fem, plot, io, default_scalar_type
import ctypes
import meshio
from petsc4py import PETSc
import numpy as np
import dolfinx
import ufl
from basix.ufl import element, mixed_element
from dolfinx import la
from dolfinx.fem import (Expression, Function, FunctionSpace, dirichletbc,
form, functionspace, locate_dofs_topological, Constant, assemble_scalar, assemble)
from dolfinx.fem.petsc import (apply_lifting, assemble_matrix, assemble_vector,
set_bc)
from dolfinx.io import XDMFFile
from dolfinx.mesh import (CellType, GhostMode, create_box,create_rectangle,
locate_entities_boundary,meshtags)
from ufl import dx, grad, dot, inner, SpatialCoordinate, Measure, inv
from ufl import as_matrix, as_vector
from ufl import TrialFunction, TestFunction
from dolfinx.fem.petsc import LinearProblem
from ufl import sqrt
from contextlib import ExitStack
import matplotlib.pyplot as plt
import csv
import os
import time
dtype = PETSc.ScalarType
comm = MPI.COMM_WORLD
rank = comm.rank
Len = 3; Wid = 1
nn = 1
nx = 75*nn; ny = 25*nn
# Create a box Mesh:
domain = create_rectangle(MPI.COMM_WORLD, [np.array([0.0, 0.0]), np.array([Len, Wid])], [nx, ny],
CellType.triangle, ghost_mode=GhostMode.shared_facet)
# Save the mesh to XDMF file
with XDMFFile(MPI.COMM_WORLD, "mesh.xdmf", "w") as xdmf_file:
xdmf_file.write_mesh(domain)
# Number of non-overlapping local elements
num_elements = domain.topology.index_map(2).size_local
num_nodes = domain.topology.index_map(0).size_local
# Summing up the counts from all processes in MPI_COMM_WORLD
num_elements_global = MPI.COMM_WORLD.allreduce(num_elements, op=MPI.SUM)
num_nodes_global = MPI.COMM_WORLD.allreduce(num_nodes, op=MPI.SUM)
# Number of local elements including the ghost elements
num_cells = domain.topology.index_map(domain.topology.dim).size_local + domain.topology.index_map(domain.topology.dim).num_ghosts
num_vertices = domain.topology.index_map(0).size_local + domain.topology.index_map(0).num_ghosts
print(f"Number of local non-overlapping elements: {num_elements}")
print(f"Number of local non-overlapping nodes: {num_nodes}")
print(f"Number of global elements: {num_elements_global}")
print(f"Number of global nodes: {num_nodes_global}")
print(f"Number of local elements including ghosts: {num_cells}")
print(f"Number of local nodes including ghosts: {num_vertices}")
# FEspace
V = functionspace(domain, ("CG", 1, (domain.geometry.dim,)))
Vh1 = functionspace(domain, ("CG", 1))
Vh1dc = functionspace(domain, ("DG", 1))
Vh0 = functionspace(domain, ("DG", 0))
# volume of the element
varea = TestFunction(Vh0)
cell_area_form = form(varea*dx)
cell_area = assemble_vector(cell_area_form)
cell_area.assemble()
# shape functions of discontinuous elements
etaf = TestFunction(Vh1dc)
Rhdc_form = form(etaf*dx)
Rhdc = assemble_vector(Rhdc_form)
Rhdc.assemble()
print(f"Length of Rhdc array : {len(Rhdc.array)}")
#connectivity
c2v = domain.topology.connectivity(domain.topology.dim, 0)
print(f"Length of c2v : {len(c2v)}")
Trf = np.zeros((num_nodes_global, num_elements_global))
num_non_zeros = 0
for cell in range(num_cells):
vertices = c2v.links(cell)
dofs = Vh1dc.dofmap.cell_dofs(cell)
Trf[vertices[0]][cell] = Rhdc[dofs[0]]
Trf[vertices[1]][cell] = Rhdc[dofs[1]]
Trf[vertices[2]][cell] = Rhdc[dofs[2]]
num_non_zeros += np.count_nonzero(Trf[:, cell])
print(num_non_zeros)
But my major concern is when I am parallelize, I am getting an error.
For example when I am running on 2 procs, here is the reported error.
Number of local non-overlapping elements: 1873
Number of local non-overlapping nodes: 989
Number of global elements: 3750
Number of global nodes: 1976
Number of local elements including ghosts: 1898
Number of local nodes including ghosts: 1023
Length of Rhdc array : 5619
Length of c2v : 1898
Trf[vertices[0]][cell] = Rhdc[dofs[0]]
~~~~^^^^^^^^^
File "petsc4py/PETSc/Vec.pyx", line 117, in petsc4py.PETSc.Vec.__getitem__
File "petsc4py/PETSc/petscvec.pxi", line 446, in petsc4py.PETSc.vec_getitem
File "petsc4py/PETSc/petscvec.pxi", line 398, in petsc4py.PETSc.vecgetvalues
petsc4py.PETSc.Error: error code 63
[1] VecGetValues() at /home/conda/feedstock_root/build_artifacts/petsc_1709043572817/work/src/vec/vec/interface/rvector.c:957
[1] VecGetValues_MPI() at /home/conda/feedstock_root/build_artifacts/petsc_1709043572817/work/src/vec/vec/impls/mpi/pdvec.c:823
[1] Argument out of range
[1] Can only get local values, trying 0
After analyzing the code, I think the issue is arising from the incompatibility of Rhdc (outputs corresponding to the local elements only) and c2v (outputs corresponding to local elements + ghost elements).
So, is there any way to get the c2v only for the local elements, gets the local matrix and convert to the global Trf matrix?
I would really appreciate any help. Thanks!