Hello dolfinx
users, I have a problem that requires me to compute eigenvalues, so dolfinx
wrappers and petsc4py
are not enough, I need to leverage slepc4py
.
Physically, I want to do resolvant analysis at frequency 0 on a simple shear flow. This requires linearising Navier-Stokes, but also using an extensor matrix and manually setting boundary conditions outside of FENICsx
wrappers.
Mathematically, I’m solving a generalised eigenvalue problem of the like of : B^HJ^{-1H}NJBf=\sigma Mf
B is an extensor so that Bf=\left[\begin{array}{c}f\\0\end{array}\right]
J is the Jacobian of Navier-Stokes with a trivial baseflow.
N is a norm matrix that satisfies : q^HNq=[u^H\;p^H]N\left[\begin{array}{c}u\\p\end{array}\right]=\int |u|^2 dx
Likewise, M is a norm matrix. f^HMf=\int |f|^2 dx
I’ve had mixed success - my code runs in serial and produces a nice graph :
However, the same code breaks apart in parallel :
Here’s my MWE. Apologies in advance for length, I did my best to boil it down.
import ufl
import numpy as np
import dolfinx as dfx
from dolfinx.io import XDMFFile
from petsc4py import PETSc as pet
from slepc4py import SLEPc as slp
from mpi4py.MPI import COMM_WORLD as comm
mesh=dfx.UnitSquareMesh(comm,100,100)
FE_vector_1=ufl.VectorElement("Lagrange",mesh.ufl_cell(),1)
FE_vector_2=ufl.VectorElement("Lagrange",mesh.ufl_cell(),2)
FE_scalar =ufl.FiniteElement("Lagrange",mesh.ufl_cell(),1)
# Taylor Hood elements
V =dfx.FunctionSpace(mesh,FE_vector_1)
TH=dfx.FunctionSpace(mesh,FE_vector_2*FE_scalar)
# Extract velocities only
U, _ = TH.sub(0).collapse(collapsed_dofs=True)
q,l=ufl.TrialFunction(TH),ufl.TestFunction(TH)
ub=dfx.Function(V)
w,z=ufl.TrialFunction(U), ufl.TestFunction(U)
u,p=ufl.split(q); v,s=ufl.split(l)
# Baseflow
if comm.size==1:
x=mesh.geometry.x[:,:2]
ub.vector[::2]=-1+2*x[:,1]
viewer = pet.Viewer().createMPIIO("sanity_check_baseflow.dat", 'w', comm)
ub.vector.view(viewer)
else:
viewer = pet.Viewer().createMPIIO("sanity_check_baseflow.dat", 'r', comm)
ub.vector.load(viewer)
# Mass csv
J_form = ufl.inner(ufl.div(u), s)
# Momentum (different test functions and IBP)
J_form += ufl.inner(ufl.grad(ub)*u, v) # Convection
J_form += ufl.inner(ufl.grad(u)*ub, v)
J_form += ufl.inner(ufl.grad(u),ufl.grad(v))/100 # Diffusion (Re=100)
J_form -= ufl.inner(p, ufl.div( v)) # Pressure
J_form *= ufl.dx
# Velocity norm
N_form = ufl.inner(u,v)*ufl.dx
# Forcing norm
M_form = ufl.inner(w,z)*ufl.dx
# Extensor
B_form = ufl.inner(w,v)*ufl.dx
# Assembling matrices
J = dfx.fem.assemble_matrix(J_form); J.assemble() # Jacobian
N = dfx.fem.assemble_matrix(N_form); N.assemble() # Mass matrix for q
M = dfx.fem.assemble_matrix(M_form); M.assemble() # Mass matrix for f
B = dfx.fem.assemble_matrix(B_form); B.assemble() # Extensor
# Boundary conditions
def border(x): return np.isclose(x[0],0)+np.isclose(x[0],1)+np.isclose(x[1],0)+np.isclose(x[1],1)
dofs,_=dfx.fem.locate_dofs_geometrical((TH.sub(0),U),border)
J.zeroRows(dofs); B.zeroRows(dofs,0)
# Sizes
n=B.getSize()[1]
n_local=B.getLocalSize()[1]
def setupKSP(KSP):
KSP.setType('preonly')
# Preconditioner
PC = KSP.getPC(); PC.setType('lu')
PC.setFactorSolverType('mumps')
KSP.setFromOptions()
# Compute solvers instead of inverses
KSPs = []
for Mat in [J,J.copy().hermitianTranspose()]:
# Krylov subspace
KSP = pet.KSP().create(comm)
KSP.setOperators(Mat)
setupKSP(KSP)
KSPs.append(KSP)
w,z = J.getVecs()
# Resolvent operator
class LHS_class:
def mult(cls,A,x,y):
B.mult(x,w)
KSPs[0].solve(w,z)
N.mult(z,w)
KSPs[1].solve(w,z)
B.multTranspose(z,y)
LHS=pet.Mat().create(comm)
LHS.setSizes([[n_local,n]]*2)
LHS.setType(pet.Mat.Type.PYTHON)
LHS.setPythonContext(LHS_class())
LHS.setUp()
# Eigensolver
EPS = slp.EPS(); EPS.create(comm)
EPS.setOperators(LHS,M) # Solve B^T*L^-1H*L^-1*B*f=sigma^2*M*f (cheaper than a proper SVD)
EPS.setProblemType(slp.EPS.ProblemType.GHEP) # Specify that A is hermitian (by construction), & M is semi-positive
EPS.setDimensions(1,10) # Find k eigenvalues only with max number of Lanczos vectors
EPS.setTolerances(1e-6,100) # Set absolute tolerance and number of iterations
# Spectral transform
ST = EPS.getST(); ST.setType('shift'); ST.setShift(0)
# Krylov subspace
KSP = ST.getKSP()
setupKSP(KSP)
EPS.setFromOptions()
EPS.solve()
f=dfx.Function(U)
EPS.getEigenvector(0,f.vector)
q=dfx.Function(TH)
B.mult(f.vector,w)
KSPs[0].solve(w,q.vector)
u,_=q.split()
with XDMFFile(comm, "sanity_check_response.xdmf","w") as xdmf:
xdmf.write_mesh(mesh)
xdmf.write_function(u)
Running the above code in the dolfinx
3.1.0 docker container in serial then in parallel (complex mode) should reproduce above behaviour.
I strongly suspect I’ve forgotten to ghost a vector or messed up my information-sharing between processors.
I couldn’t reproduce the issue with a single KSP
so I’m stuck with considering how petsc4py
and slepc4py
interact…