Spack installation problem for Dolfinx 0.9.0 - compile options error?

Good day eveyone. I am trying to work through a Spack installation of Fenicsx 0.9.0 with petsc 3.24.1 on a Ubuntu 24.04 system. The compilation process finishes without a hitch, but running an example crashes.

First the install parameters:

spack add petsc@3.24.1 -trilinos +complex +fortran +mumps +superlu-dist
spack add py-fenics-dolfinx@0.9.0 +petsc4py +slepc4py
spack add py-mpi4py@4.0.1
spack add py-numpy@2.1.2
spack add py-matplotlib
spack add py-pandas
spack add py-gmsh
spack install

I then run the following code (file with name Rect1.py) on one processor with the debugger

mpirun -np 1 python3 Rect1.py

and I see that the LU factorization stage fails immediately with a SEGV fault. Mesh building, matrix and vector assemblies execute without error. Is there some incompatibility between Dolfinx 0.9.0 and PETSc 3.24.1? Am I missing something in the compile configuration?

The code that I run:

# Example to test multi-mode waveguide absorbing boundary condition
# for a simple rectangular waveguide
import pdb
import sys
import numpy as np
from mpi4py import MPI as M
from petsc4py import PETSc as P
import gmsh
import ufl as U
import basix.ufl
from dolfinx.io import gmshio
from dolfinx.fem import petsc
from dolfinx import fem
from dolfinx import io
import time

eta0 = 377.0 # free space wave impedance

class ModesTE:
    def __init__(self, beta, k, a, b, m, n):
        self.beta = beta
        self.k = k
        self.a = a
        self.b = b
        self.m = m
        self.n = n
    def eval_H(self, x):
        if((self.m != 0) and (self.n != 0)):
            B = np.sqrt(self.n * self.m * 4.0 / (self.a * self.b))
        elif self.m == 0:
            B = np.sqrt(self.n * 2.0 / (self.a * self.b))
        else:
            B = np.sqrt(self.m * 2.0 / (self.a * self.b))
        hx = B * np.sin((np.pi * self.m / self.a) * x[0]) * np.cos((np.pi * self.n / self.b) * x[1])
        hy = B * np.cos((np.pi * self.m / self.a) * x[0]) * np.sin((np.pi * self.n / self.b) * x[1])
        hz = np.full_like(hx, 0.0+0j, dtype=np.complex128)
        return(hx, hy, hz)
    def eval_E(self, x):
        if((self.m != 0) and (self.n != 0)):
            B = np.sqrt(self.n * self.m * 4.0 / (ZTE * self.a * self.b))
        elif self.m == 0:
            B = np.sqrt(self.n * 2.0 / (ZTE * self.a * self.b))
        else:
            B = np.sqrt(self.m * 2.0 / (ZTE * self.a * self.b))
        ZTE = eta0 * self.k / self.beta
        ey = -ZTE * B * np.sin((np.pi * self.m / self.a) * x[0]) * np.cos((np.pi * self.n / self.b) * x[1])
        ex = ZTE * B * np.cos((np.pi * self.m / self.a) * x[0]) * np.sin((np.pi * self.n / self.b) * x[1])
        ez = np.full_like(hx, 0.0+0j, dtype=np.complex128)
        return(ex, ey, ez)

eps = 1.0e-4
a = 22.86 #WG width
b = 10.16 #WG height
l = 50.8 # WG length
f = 10.5 #frequency of operation GHz
c = 300.0 #speed of light
lm = 1.60

comm = M.COMM_WORLD
mpiRank = comm.rank
modelRank = 0 # where GMSH runs
k0 = 2.0 * np.pi * f / c #wavenumber wrt frequency

if mpiRank == modelRank:
    gmsh.initialize()
    gmsh.option.setNumber('General.Terminal', 1)
    gmsh.model.add('Waveguide')
    gmsh.model.occ.addBox(0, 0, 0, a, b, l, 1)
    gmsh.model.occ.synchronize()
    pt = gmsh.model.getEntities(dim=0)
    gmsh.model.mesh.setSize(pt, lm)

    gmsh.option.setNumber('Mesh.MeshSizeMin', 0.01)
    gmsh.option.setNumber('Mesh.MeshSizeMax', 2.2)
    gmsh.option.setNumber('Mesh.Algorithm', 6) #1=Mesh Adapt, 2=Auto, 3=Initial mesh only, 5=Delaunay, 6=Frontal-Delaunay
    gmsh.option.setNumber('Mesh.MinimumCirclePoints', 30)
    gmsh.option.setNumber('Mesh.CharacteristicLengthFromCurvature', 1)
    gmsh.option.setNumber('Mesh.Tetrahedra', 0)
    OUT = []
    IN = []
    PEC = []
    bb = gmsh.model.getEntities(dim=3)
    pt = gmsh.model.getBoundary(bb, combined=True, oriented=False, recursive=False)
    for bnd in pt:
        CoM = gmsh.model.occ.getCenterOfMass(bnd[0], bnd[1])
        if np.abs(CoM[2]) < eps:
            IN.append(bnd[1])
        elif np.abs(CoM[2] - l) < eps:
            OUT.append(bnd[1])
        else:
            PEC.append(bnd[1])

    gmsh.model.addPhysicalGroup(3, [1], 1) #WG
    gmsh.model.setPhysicalName(3, 1, "Waveguide")
    gmsh.model.addPhysicalGroup(2, PEC, 1)
    gmsh.model.setPhysicalName(2, 1, "PEC")
    gmsh.model.addPhysicalGroup(2, IN, 2)
    gmsh.model.setPhysicalName(2, 2, "INPORT")
    gmsh.model.addPhysicalGroup(2, OUT, 3)
    gmsh.model.setPhysicalName(2, 3, "OUTPORT")

    gmsh.model.mesh.generate(3)
    gmsh.fltk.run()

mesh, ct, fm = gmshio.model_to_mesh(gmsh.model, comm, modelRank, gdim=3)
if mpiRank == modelRank:
    gmsh.finalize()

mesh.topology.create_connectivity(mesh.topology.dim-1, mesh.topology.dim)
elem = basix.ufl.element('Nedelec 1st kind H(curl)', mesh.basix_cell(), degree=2)
V = fem.functionspace(mesh, elem)
u = U.TrialFunction(V)
v = U.TestFunction(V)
normal = U.FacetNormal(mesh)
ds = U.Measure("ds", domain=mesh, subdomain_data = fm)
# Dirichlet BCs
facets = fm.find(1)
ubc = fem.Function(V)
ubc.interpolate(lambda x: np.array([0*x[0], 0*x[1], 0*x[2]], dtype=np.complex128))
dofs = fem.locate_dofs_topological(V, mesh.topology.dim-1, facets)
bc = fem.dirichletbc(ubc, dofs)
# Build rhs
uh = fem.Function(V, dtype=np.complex128)
kz = np.sqrt(k0 * k0 - (np.pi / a)**2)
f = ModesTE(kz, k0, a, b, 1, 0)
uh.interpolate(f.eval_H)
finc = 2.0j * k0 * eta0 * U.inner(U.cross(normal, uh), v) * ds(2)
uh.name = "H_inc"
Lb = 1.0j * kz * U.inner(U.cross(normal, u), U.cross(normal, v)) * ds(2)
Lc = 1.0j * kz * U.inner(U.cross(normal, u), U.cross(normal, v)) * ds(3)
t_start = time.perf_counter()

# Weak form
L = (U.inner(U.curl(u), U.curl(v)) - k0 * k0 * U.inner(u, v)) * U.dx
Df = fem.form(L+Lb+Lc)
Rf = fem.form(finc)

num_dofs_local = V.dofmap.index_map.size_local * V.dofmap.index_map_bs
num_dofs_global = V.dofmap.index_map.size_global * V.dofmap.index_map_bs
print(f"Number of dofs (owned) by rank {mpiRank}: {num_dofs_local}")
if mpiRank == modelRank:
    print(f"Number of dofs global: {num_dofs_global}")

def VecCreate(linear_form, bilin_form, BCs):
    vec = petsc.create_vector(linear_form)
    petsc.assemble_vector(vec, linear_form)
#    petsc.apply_lifting(vec, [bilin_form], [[BCs]])
    vec.ghostUpdate(addv=P.InsertMode.ADD_VALUES, mode=P.ScatterMode.REVERSE)
    petsc.set_bc(vec, [BCs])
    vec.ghostUpdate(addv=P.InsertMode.INSERT, mode=P.ScatterMode.FORWARD)
    return vec

A = petsc.assemble_matrix(Df, bcs=[bc])
A.assemble()
b = VecCreate(Rf, Df, bc)

petsc_options = {
        "ksp_type": "preonly",
        "pc_type": "lu",
        "pc_factor_mat_solver_type": "mumps",
        "pc_factor_nonzeros_along_diagonal": 1e-8,
        "ksp_error_if_not_converged": True,
        "ksp_monitor": None }
opts = P.Options()
for key, value in petsc_options.items():
    opts[key] = value

solver = P.KSP().create(mesh.comm)
solver.setOperators(A)
solver.setFromOptions()
pdb.set_trace()
solver.setUp()
print("***Made it to this point***")
t_stop = time.perf_counter()

print("Solver setup time = {0}".format(t_stop - t_start))
sol = fem.Function(V)
sol.name="Solution"
solver.solve(b, sol.x.petsc_vec)
solver.view()
sol.x.petsc_vec.ghostUpdate(addv=P.InsertMode.INSERT, mode=P.ScatterMode.FORWARD)
t_solve = time.perf_counter()

print("Solve time = {0}".format(t_solve - t_stop))
Vw = basix.ufl.element('DG', mesh.basix_cell(), 0, shape=(mesh.geometry.dim, ))
W = fem.functionspace(mesh, Vw)
Et = fem.Function(W)
Et.interpolate(sol)
Et.name = "ElectricField"
with io.XDMFFile(mesh.comm, "Efield.xdmf", "w") as xx:
    xx.write_mesh(mesh)
    xx.write_function(Et)

sys.exit(0)

The error message:

-> solver.setUp()
n
--------------------------------------------------------------------------
prterun noticed that process rank 0 with PID 66273 on node bandicoot exited on
signal 11 (Segmentation fault).
--------------------------------------------------------------------------
(Pdb)

I can’t specify -trilinos, as it is not a variant in the petsc spack:

With an almost similar environment, i.e.

# This is a Spack Environment file.
#
# It describes a set of packages to be installed, along with
# configuration settings.
spack:
  # add package specs to the `specs` list
  specs:
  - py-fenics-dolfinx@0.9.0+petsc4py+slepc4py ^petsc@3.24.1+complex+mumps+superlu-dist
  - py-mpi4py@4.0.1
  - py-numpy@2.1.2

(I installed gmsh with pip as it is way faster to do so)
I can reproduce the segfault.
The segfault disappears if I remove:

from your options settings.

I can reproduce the same error on the stable build (0.10.0) in docker (with a few minor tweaks to your code).

This can also be reproduced with this minimal code:

from mpi4py import MPI
import dolfinx.fem.petsc
import ufl
import numpy as np
mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10)

V = dolfinx.fem.functionspace(mesh, ("Lagrange", 1))
u, v = ufl.TrialFunction(V), ufl.TestFunction(V)

a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx
dtype = dolfinx.default_scalar_type
if np.issubdtype(dtype, np.complexfloating):
    f = dolfinx.fem.Constant(mesh, 1.0 + 2.0j)
else:
    f = dolfinx.fem.Constant(mesh, 1.0)
L = ufl.inner(f, v) * ufl.dx


petsc_options = {
        "ksp_type": "preonly",
        "pc_type": "lu",
        "pc_factor_mat_solver_type": "mumps",
        "pc_factor_nonzeros_along_diagonal": 1e-8,
        "ksp_error_if_not_converged": True,
        "ksp_monitor": None }

problem = dolfinx.fem.petsc.LinearProblem(a, L, bcs=[], petsc_options=petsc_options, petsc_options_prefix="mwe_")
problem.solve()

which runs fine with "superlu_dist" intead of "mumps", meaning that there is likely a bug in the PETSc mumps interface.

Simplifying further, I can make a “pure” PETSc example where this fails:

from mpi4py import MPI
from petsc4py import PETSc
import numpy as np

assert MPI.COMM_WORLD.size == 1
M = 4
A = PETSc.Mat().create(MPI.COMM_WORLD)
A.setSizes([[4, None], [4, None]])
A.setType("aij")
vec = PETSc.Vec().create(MPI.COMM_WORLD)
vec.setType("mpi")
vec.setSizes(M)

vec.set(1.0)

A.setDiagonal(vec)
A.assemble()

b = PETSc.Vec().create(MPI.COMM_WORLD)
b.setSizes(M)
b.setType("mpi")
b.array_w[:] = np.arange(len(b.array_r))

petsc_options = {
        "ksp_type": "preonly",
        "pc_type": "lu",
        "pc_factor_mat_solver_type": "mumps",
        "pc_factor_nonzeros_along_diagonal": 1e-8,
        "ksp_error_if_not_converged": True,
        "ksp_monitor": None }

x = b.duplicate()

ksp = PETSc.KSP().create(MPI.COMM_WORLD)
ksp.setOperators(A)
ksp.setFromOptions()
for key, value in petsc_options.items():
    PETSc.Options().setValue(key, value)
ksp.setFromOptions()
ksp.setUp()
ksp.solve(b, x)

turns out, the issue is using the "pc_factor_nonzeros_along_diagonal" option with matrix type "aij".

Thus, in your code, instead of using:

I would call:

A = petsc.create_matrix(Df, "mpiaij")
petsc.assemble_matrix(A, Df, bcs=[bc])

which then resolves the issue for me.

EDIT: Petsc issue tracked at: Mumps with mattype aij and pc_factor_nonzeros_along_diagonal segfaults (#1829) · Issues · PETSc / petsc · GitLab

Thanks @Dokken. The “trilinos” line was a typo on my part. It should have been ~trilinos (suggested by an installation guide I found online).

Anyway, the code works with the fixes that you mentioned. However, there is still a problem. In reality, I wanted to use the newer version of PETSc to solve problems using MUMPS with nested matrices. This “PETSc only” code where I form a nested matrix system crashes with a segmentation fault. Perhaps I am incorrectly specifying the matrices, but I fear that MUMPS has a problem with the nested matrix construction.

import sys
import numpy as np
from mpi4py import MPI as M
from petsc4py import PETSc as P

comm = M.COMM_WORLD
mpiRank = comm.rank

N = 100 # Matrix order
BW = 6 # matrix bandwidth fill

U = P.Mat().createAIJ(size=(N, 2))
V = P.Mat().createAIJ(size=(N, N))
Id = P.Mat().createAIJ(size=(2,2))

U.setUp()
V.setUp()
Id.setUp()

Id.setValue(0, 0, 1, addv=P.InsertMode.INSERT_VALUES)
Id.setValue(1, 1, 1, addv=P.InsertMode.INSERT_VALUES)

rnd = np.random.default_rng(seed=109987091)

ix = V.getOwnershipRanges()
iy = U.getOwnershipRanges()
for i in range(N):
    for k in range(-BW, BW):
        j = i + k
        if (i >= ix[mpiRank]) and (i < ix[mpiRank+1]):
            if(j >= 0) and (j < N):
                aa = rnd.standard_normal(1)[0]
                V.setValue(i, j, aa, addv=P.InsertMode.INSERT_VALUES)
    if(i > N/2-BW-1) and (i <N/2+BW):
        if (i >= iy[mpiRank]) and (i < iy[mpiRank+1]):
            aa = rnd.standard_normal(1)[0]
            U.setValue(j, 0, aa, addv=P.InsertMode.INSERT_VALUES)
            aa = rnd.standard_normal(1)[0]
            U.setValue(j, 1, aa, addv=P.InsertMode.INSERT_VALUES)

U.assemble()
V.assemble()
Id.assemble()

Ut = U.transpose()

print(mpiRank, U.getOwnershipRanges(), V.getOwnershipRanges())

A_new = P.Mat()
A_new.createNest([[V, U], [Ut, Id]], comm = comm)
A_new.setUp()


iz = A_new.getOwnershipRanges()
print(iz)

petsc_options = {
        "ksp_type": "preonly",
        "pc_type": "lu",
        "pc_factor_mat_solver_type": "mumps",
        "pc_factor_nonzeros_along_diagonal": 1e-8,
        "ksp_error_if_not_converged": True,
        "ksp_monitor": None }
opts = P.Options()
for key, value in petsc_options.items():
    opts[key] = value

solver = P.KSP().create(comm)
solver.setOperators(A_new)
solver.setFromOptions()
#Program crashes here
solver.setUp()

b = P.Vec().create(comm=comm)
b.setSizes(N+2)
b.setUp()
for i in range(iz[mpiRank],iz[mpiRank+1]):
    aa = rnd.standard_normal(1)[0]
    b.setValue(i, aa, addv=P.InsertMode.INSERT_VALUES)
b.assemble()
  
x = b.duplicate()
solver.solve(b, x)

sys.exit(0)

In this case, removing pc_factor_nonzeros_along_diagonal has no effect.
The MPI ownership shows that the new matrices have the proper dimension, but MUMPS fails on the factorization. SuperLU will not work on nested matrices.

The output from the program is:

1 [0 1 2 2 2] [  0  25  50  75 100]
2 [0 1 2 2 2] [  0  25  50  75 100]
0 [0 1 2 2 2] [  0  25  50  75 100]
3 [0 1 2 2 2] [  0  25  50  75 100]
[  0  26  52  77 102]
[  0  26  52  77 102]
[  0  26  52  77 102]
[  0  26  52  77 102]
[2]PETSC ERROR: ------------------------------------------------------------------------
[2]PETSC ERROR: Caught signal number 11 SEGV: Segmentation Violation, probably memory access out of range
[2]PETSC ERROR: Try option -start_in_debugger or -on_error_attach_debugger
[2]PETSC ERROR: or see https://petsc.org/release/faq/#valgrind and https://petsc.org/release/faq/
[2]PETSC ERROR: configure using --with-debugging=yes, recompile, link, and run 
[2]PETSC ERROR: to get more information on the crash.
[2]PETSC ERROR: Run with -malloc_debug to check if memory corruption is causing the crash.
--------------------------------------------------------------------------
MPI_ABORT was invoked on rank 2 in communicator MPI_COMM_WORLD
  Proc: [[57550,1],2]
  Errorcode: 59

NOTE: invoking MPI_ABORT causes Open MPI to kill all MPI processes.
You may or may not see output from other processes, depending on
exactly when Open MPI kills them.
--------------------------------------------------------------------------
--------------------------------------------------------------------------
prterun has exited due to process rank 2 with PID 0 on node bandicoot calling
"abort". This may have caused other processes in the application to be
terminated by signals sent by prterun (as reported here).
--------------------------------------------------------------------------

If you think it more appropriate, I can move this last question to a new post.

The problem is how you set information to your petsc vectors and matrices.
Here is a working example that runs on one process:

import sys
import numpy as np
from mpi4py import MPI as M
from petsc4py import PETSc as P

comm = M.COMM_WORLD
mpiRank = comm.rank

N = 100 # Matrix order
BW = 6 # matrix bandwidth fill

U = P.Mat().createAIJ(size=((N, N), (2, 2)))
V = P.Mat().createAIJ(size=((N, N), (N, N)))
Id = P.Mat().createAIJ(size=((2, 2), (2, 2)))

U.setUp()
V.setUp()
Id.setUp()

Id.setValue(0, 0, 1, addv=P.InsertMode.INSERT_VALUES)
Id.setValue(1, 1, 1, addv=P.InsertMode.INSERT_VALUES)

rnd = np.random.default_rng(seed=109987091)

ix = V.getOwnershipRanges()
iy = U.getOwnershipRanges()
for i in range(N):
    for k in range(-BW, BW):
        j = i + k
        if (i >= ix[mpiRank]) and (i < ix[mpiRank+1]):
            if(j >= 0) and (j < N):
                aa = rnd.standard_normal(1)[0]
                V.setValue(i, j, aa, addv=P.InsertMode.INSERT_VALUES)
    if(i > N/2-BW-1) and (i <N/2+BW):
        if (i >= iy[mpiRank]) and (i < iy[mpiRank+1]):
            aa = rnd.standard_normal(1)[0]
            U.setValue(j, 0, aa, addv=P.InsertMode.INSERT_VALUES)
            aa = rnd.standard_normal(1)[0]
            U.setValue(j, 1, aa, addv=P.InsertMode.INSERT_VALUES)

U.assemble()
V.assemble()
Id.assemble()

Ut = U.copy()
U.transpose()

print(mpiRank, U.getOwnershipRanges(), V.getOwnershipRanges())

A_new = P.Mat()
A_new.createNest([[V, Ut], [U, Id]], comm = comm)
A_new.setUp()



petsc_options = {
        "ksp_type": "preonly",
        "pc_type": "lu",
        "pc_factor_mat_solver_type": "mumps",
        #"pc_factor_nonzeros_along_diagonal": 1e-8,
        "ksp_error_if_not_converged": True,
        "ksp_monitor": None }
opts = P.Options()
for key, value in petsc_options.items():
    opts[key] = value

solver = P.KSP().create(comm)
solver.setOperators(A_new)
solver.setFromOptions()
#Program crashes here
solver.setUp()

b0 = P.Vec().create(comm=comm)
b0.setSizes(N)
b0.setUp()
b1 = P.Vec().create(comm=comm)
b1.setSizes(2)
b1.setUp()
b = P.Vec().createNest([b0, b1], comm=comm)
b.setUp()
for bi in b.getNestSubVecs():
    aa = rnd.standard_normal(bi.getSize())
    indices = np.arange(bi.getLocalSize(), dtype=np.int32)
    bi.setValuesLocal(indices, aa, addv=P.InsertMode.INSERT_VALUES)
b.assemble()
  
x = b.duplicate()
solver.solve(b, x)

print(x.array_r, b.array_r)

sys.exit(0)

Note that this will not be correct in parallel, as the local and global size is set to the same.
Also note how you need to copy U to make its transpose (as it is done in-place).

Also note how you can set values to the local part of the sub vector.

Dear @Dokken, Thanks for the clarification! Indeed it works on a single processor. The question remains: how to do this in parallel on many processors. I assume I would need to specify how break up the matrix into chunks that correspond to the partitioning in the MPI environment. For parallel solution, would it be simpler to forgo the nested matrix formalism entirely and explicitly construct a new unified sparse matrix by extracting the nonzero elements from the constituent submatrices?

As your example is pure PETSc at the moment, with no DOLFINx, and no clear definition of partitioning, it is quite hard for me to give general advice. To me it seems like you want to add some specific spaces to couple to classical FEniCS spaces. What spaces in particular?

The goal is to incorporate a mode-matching boundary condition, which in the end, produces a rank-k modification of the global finite element matrix system of the form
\left\lbrack\begin{array}{cc} A & U^T \\ U & I\end{array}\right\rbrack\left\lbrace\begin{array}{c} e \\ c\end{array}\right\rbrace = \left\lbrace\begin{array}{c} e^\mathrm{inc} \\ 0\end{array}\right\rbrace
where A is the usual matrix operator generated by DOLFINx and I is the k dimensional identity matrix. The block U is a set of k vectors generated by a DOLFINx linear form given by
U_k = \iint\limits_{\partial\Omega} v\cdot h_k \, dS,
where \partial\Omega is the domain of boundary integration over a port where waves exit. k corresponds to the wave mode and h_k is a known wave function. The blocks in the LHS vector are e= DoFs of finite element system and c= are the wave mode coefficients on the boundary. E^\mathrm{inc} is the incident wave field. The linear form uses the same set of basis functions used to generate A. I (naively) thought it would be straightforward to build the above block system using the nested matrix. I realize that I need to take into account how the matrix/vector system is partitioned out to the various processors and I am trying to decide which approach I should use.

I would probably use the real function space (with a vector space) from scifem to generate these matrices. I think it should be doable, and would naturally take care of the parallel distribution of information.

I currently do not have a laptop at hand, so I cannot make an example.
I would suggest looking at

I might have time to cook up something tonight.

Thanks for the link. To give some background, the variational form I use is of the form
\mathit{L}=\iiint\limits_{\Omega}\left (\nabla\times E^*\cdot \nabla\times E-k_0^2\epsilon_r E^*\cdot E\right )\, dV + B(E, C)
where we have a boundary operator that includes incoming (known) and outgoing (unknown) waves:
B(E,C) = -2jk_0\eta0\iint\limits_{\partial\Omega_i}E^*\times h^{inc}\cdot n\, dS + jk_0\eta_0 C\iint\limits_{\partial\Omega_o} E^*\times h\cdot n\, dS
and C (the wave amplitude on the boundary) is defined by the constraint
C - \iint\limits_{\partial\Omega_o}E\times h\cdot n\, dS= 0.

The function E is approximated using the Nedelec basis on tetrahedra. I need to study up a bit on how real function spaces are applied to this problem. Cheers and good evening!

EDIT: I forgot to add that the boundary in question generally covers a small number of DoFs, so the resulting boundary operator is still quite sparse.

Here is an example using a real space on the exterior facets of the mesh to create a (2, num_dofs_u) matrix with the first row being A_{0, j} = \int_{\partial\Omega}\frac{\partial u_j}{\partial x}~\mathrm{d}s and the second row being A_{1, j}=\int_{\partial\Omega}\frac{\partial u_j}{\partial y}~\mathrm{d}s

from mpi4py import MPI
import dolfinx
import ufl
import scifem
import numpy as np

mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 4, 4)

V = dolfinx.fem.functionspace(mesh, ("Lagrange", 1))
u = ufl.TrialFunction(V)


mesh.topology.create_connectivity(mesh.topology.dim-1, mesh.topology.dim)
exterior_facets = dolfinx.mesh.exterior_facet_indices(mesh.topology)

tag = 33
ft = dolfinx.mesh.meshtags(mesh, mesh.topology.dim-1, exterior_facets, np.full_like(exterior_facets, tag,dtype=np.int32))

Gamma, entity_map, _, _ = dolfinx.mesh.create_submesh(mesh, mesh.topology.dim-1, exterior_facets)
Z = scifem.create_real_functionspace(Gamma, value_shape=(2,))

z0, z1 = ufl.TestFunctions(Z)


ds = ufl.Measure("ds", domain=mesh, subdomain_data=ft, subdomain_id=tag)
a = u.dx(0)*z0*ds + u.dx(1)*z1*ds

a_compiled = dolfinx.fem.form(a, entity_maps=[entity_map])
A = dolfinx.fem.petsc.assemble_matrix(a_compiled)
A.assemble()
print(A.norm(0), A.norm(2))

Just tried out the code. I think it choked on the dolfinx-complex installation.

 File "/home/bill/spack/var/spack/environments/Fenics-090-complex/.spack-env/view/lib/python3.12/site-packages/ufl/algorithms/compute_form_data.py", line 427, in compute_form_data
    check_form_arity(preprocessed_form, self.original_form.arguments(), complex_mode)
  File "/home/bill/spack/var/spack/environments/Fenics-090-complex/.spack-env/view/lib/python3.12/site-packages/ufl/algorithms/check_arities.py", line 213, in check_form_arity
    check_integrand_arity(itg.integrand(), arguments, complex_mode)
  File "/home/bill/spack/var/spack/environments/Fenics-090-complex/.spack-env/view/lib/python3.12/site-packages/ufl/algorithms/check_arities.py", line 205, in check_integrand_arity
    raise ArityMismatch("Failure to conjugate test function in complex Form")
ufl.algorithms.check_arities.ArityMismatch: Failure to conjugate test function in complex Form

Is there a way to make it work for complex forms? Will it work for Nedelec basis functions?

You only need to change the variational form to something that support complex mode.
For instance:

from mpi4py import MPI
import dolfinx
import ufl
import scifem
import numpy as np

mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 4, 4)

V = dolfinx.fem.functionspace(mesh, ("Lagrange", 1))
u = ufl.TrialFunction(V)


mesh.topology.create_connectivity(mesh.topology.dim-1, mesh.topology.dim)
exterior_facets = dolfinx.mesh.exterior_facet_indices(mesh.topology)

tag = 33
ft = dolfinx.mesh.meshtags(mesh, mesh.topology.dim-1, exterior_facets, np.full_like(exterior_facets, tag,dtype=np.int32))

Gamma, entity_map, _, _ = dolfinx.mesh.create_submesh(mesh, mesh.topology.dim-1, exterior_facets)
Z = scifem.create_real_functionspace(Gamma, value_shape=(2,))

z0, z1 = ufl.TestFunctions(Z)


ds = ufl.Measure("ds", domain=mesh, subdomain_data=ft, subdomain_id=tag)
a = ufl.inner(u.dx(0), z0)*ds + ufl.inner(u.dx(1), z1)*ds

a_compiled = dolfinx.fem.form(a, entity_maps=[entity_map])
A = dolfinx.fem.petsc.assemble_matrix(a_compiled)
A.assemble()
print(A[:,:])
print(A.norm(0), A.norm(2))

Ref your Nedelec question, yes it will work. Here is an example

from mpi4py import MPI
import dolfinx
import ufl
import scifem
import numpy as np

mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 4, 4)

V = dolfinx.fem.functionspace(mesh, ("N1curl", 1))
u = ufl.TrialFunction(V)


mesh.topology.create_connectivity(mesh.topology.dim-1, mesh.topology.dim)
exterior_facets = dolfinx.mesh.exterior_facet_indices(mesh.topology)

tag = 33
ft = dolfinx.mesh.meshtags(mesh, mesh.topology.dim-1, exterior_facets, np.full_like(exterior_facets, tag,dtype=np.int32))

Gamma, entity_map, _, _ = dolfinx.mesh.create_submesh(mesh, mesh.topology.dim-1, exterior_facets)
Z = scifem.create_real_functionspace(Gamma, value_shape=(2,))

z0, z1 = ufl.TestFunctions(Z)

n = ufl.FacetNormal(mesh)

ds = ufl.Measure("ds", domain=mesh, subdomain_data=ft, subdomain_id=tag)
a = ufl.inner(ufl.dot(u, n), z0)*ds + ufl.inner(-ufl.dot(u, n), z1)*ds

a_compiled = dolfinx.fem.form(a, entity_maps=[entity_map])
A = dolfinx.fem.petsc.assemble_matrix(a_compiled)
A.assemble()
print(A[:,:])
print(A.norm(0), A.norm(2))

Thanks! Sorry for all the handholding through might seem silly questions.. :confused: Anyway, running the code modified for complex forms, I get an error on the entity map in the dolfinx.fem.form statement:

Traceback (most recent call last):
  File "/home/bill/Cluster/Fenics2020/WaveguideBC/RealSpace.py", line 28, in <module>
    a_compiled = dolfinx.fem.form(a, entity_maps=[entity_map])
                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bill/spack/var/spack/environments/Fenics-090-complex/.spack-env/view/lib/python3.12/site-packages/dolfinx/fem/forms.py", line 337, in form
    return _create_form(form)
           ^^^^^^^^^^^^^^^^^^
  File "/home/bill/spack/var/spack/environments/Fenics-090-complex/.spack-env/view/lib/python3.12/site-packages/dolfinx/fem/forms.py", line 331, in _create_form
    return _form(form)
           ^^^^^^^^^^^
  File "/home/bill/spack/var/spack/environments/Fenics-090-complex/.spack-env/view/lib/python3.12/site-packages/dolfinx/fem/forms.py", line 304, in _form
    _entity_maps = {msh._cpp_object: emap for (msh, emap) in entity_maps.items()}
                                                             ^^^^^^^^^^^^^^^^^
AttributeError: 'list' object has no attribute 'items'

Could this be because I am using a different version of DOLFINx (0.9.0)? I printed out the contents of entity_map, and there are elements in the list.

If you use 0.9, you should change the entity_maps input to

num_facets_parent = mesh.topology.index_map(mesh.topology.dim-1).size_local + mesh.topology.index_map(mesh.topology.dim-1).num_ghosts
inverse_map = np.full(num_facets_parent, -1, dtype=np.int32)
inverse_map[exterior_facets] = np.arange(len(exterior_facets), dtype=np.int32)
a_compiled = dolfinx.fem.form(a, entity_maps={Gamma: inverse_map})

As you observe, v0.10 has made the usage of entity maps way simpler.

1 Like

That’s the trick! It works!
It’s good to know that version 0.10.0 simplifies some of this stuff. Changed the mesh to “unit cube” and it works fine as well.

At this point, I need to figure out how to use this formalism to generate something like
\mathbf{\int v \cdot f}(x,y) \, dS, where \mathbf{f} is a user supplied function. Would it make sense to define something like:

a = ufl.inner(ufl.dot(ufl.cross(u,uh), n), z0) * ds ufl.inner(-ufl.dot(ufl.cross(u, uh), n), z1) * ds

to do this, where uh is a user defined vector function interpolated on V? (I tried it in the code you sent and it ran without a problem, but I need to verify that the result makes sense.)

Anyway, thanks heaps for the guidance! Good evening!