Hello,
I am on DOLFINx version: 0.10.0.post2 based on GIT commit: 601b058a51f6a8f3cea11811048283f00855e18d
Porting from 0.9 to 0.10 was successful except for my custom SNES solver and I am out of ideas. I have a custom SNES solver because my bilinear forms are assembled block-wise, i.e.
self.U_gobal = ufl.MixedFunctionSpace(V_p1, V_v1, V_p2, V_v2) # Proper function spaces
self.U = [V_p1, V_v1, V_p2, V_v2]
p1 = fem.Function(self.U[0])
v1 = fem.Function(self.U[1])
p2 = fem.Function(self.U[2])
v2 = fem.Function(self.U[3])
q1, u1, q2, u2 = ufl.TestFunctions(self.U_global)
res = ufl.inner(v1, u1) * dx(1)
res += ufl.inner(ufl.dot(v1, ufl.grad(v1)), u1) * dx(1)
res += ufl.inner(ufl.grad(v2), ufl.grad(u2)) * dx(2)
# ...
, and I couldn’t get it to work with the new fem.petsc.NonlinearProblem, which seems to only work for MixedElements (which I do not use).
Anyways, here is the code from my custom SNES solver:
import typing
import numpy as np
import ufl
import dolfinx
import petsc4py
from mpi4py import MPI
from petsc4py import PETSc
# Define the Navier-Stokes problem class
class NavierStokesProblem:
"""Defines a nonlinear problem for SNES solver"""
def __init__(
self, F: list[dolfinx.fem.Form], J: list[list[dolfinx.fem.Form]],
solutions: list[dolfinx.fem.Function],
bcs: list[dolfinx.fem.DirichletBC],
P: typing.Optional[list[list[ufl.Form]]] = None
) -> None:
self._F = F
self._J = J
self._solutions = solutions
self._bcs = bcs
self._P = P
#self._obj_vec = dolfinx.fem.petsc.create_vector_block(F) # v0.9
self._obj_vec = dolfinx.fem.petsc.create_vector(dolfinx.fem.extract_function_spaces(F), kind=PETSc.Vec.Type.MPI) # v0.10
def update_solutions(self, x: petsc4py.PETSc.Vec) -> None:
"""Update `self._solutions` with the data from `x`"""
maps = [
(si.function_space.dofmap.index_map, si.function_space.dofmap.index_map_bs)
for si in self._solutions
]
local_x = dolfinx.cpp.la.petsc.get_local_vectors(x, maps)
for lx, s in zip(local_x, self._solutions):
s.x.array[:] = lx
s.x.scatter_forward()
def obj(self, snes: petsc4py.PETSc.SNES, x: petsc4py.PETSc.Vec) -> np.float64:
"""Compute the norm of the residual"""
self.F(snes, x, self._obj_vec)
return self._obj_vec.norm()
def F(self, snes: petsc4py.PETSc.SNES, x: petsc4py.PETSc.Vec, F_vec: petsc4py.PETSc.Vec) -> None:
"""Assemble the residual"""
self.update_solutions(x)
with F_vec.localForm() as F_vec_local:
F_vec_local.set(0.0)
# dolfinx.fem.petsc.assemble_vector_block(F_vec, self._F, self._J, self._bcs, x0=x, alpha=-1.0) # v0.9
# From here v0.10
dolfinx.fem.petsc.assemble_vector(F_vec, self._F)
bcs1 = dolfinx.fem.bcs_by_block(
dolfinx.fem.extract_function_spaces(self._J, 1),
self._bcs,
)
dolfinx.fem.petsc.apply_lifting(F_vec, self._J, bcs=bcs1, x0=x, alpha=-1)
F_vec.ghostUpdate(
addv=PETSc.InsertMode.ADD,
mode=PETSc.ScatterMode.REVERSE,
)
bcs0 = dolfinx.fem.bcs_by_block(
dolfinx.fem.extract_function_spaces(self._F),
self._bcs,
)
dolfinx.fem.petsc.set_bc(F_vec, bcs0)
# F_vec.ghostUpdate(PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.FORWARD) # v0.9
def J(self, snes: petsc4py.PETSc.SNES, x: petsc4py.PETSc.Vec, J_mat: petsc4py.PETSc.Mat, P_mat: petsc4py.PETSc.Mat) -> None:
"""Assemble the Jacobian matrix"""
self.update_solutions(x)
J_mat.zeroEntries()
# dolfinx.fem.petsc.assemble_matrix_block(J_mat, self._J, self._bcs) # v0.9
dolfinx.fem.petsc.assemble_matrix(J_mat, self._J, self._bcs, diag=1.0) #v0.10
J_mat.assemble()
if self._P is not None:
P_mat.zeroEntries()
dolfinx.fem.petsc.assemble_matrix(P_mat, self._P, self._bcs, diag=1.0)
P_mat.assemble()
def solve_nonlinear(
sols: list[dolfinx.fem.Function],
trials,
res: ufl.Form,
bc: list[dolfinx.fem.DirichletBC],
entitymaps,
tau_abs: float = 1e-3,
tau_rel: float = 0.05
) -> dict:
jac = ufl.derivative(res, sols[0], trials[0]) + ufl.derivative(res, sols[1], trials[1]) + ufl.derivative(res, sols[2], trials[2]) + ufl.derivative(res, sols[3], trials[3])
F = dolfinx.fem.form(ufl.extract_blocks(res),entity_maps=entitymaps)
J = dolfinx.fem.form(ufl.extract_blocks(jac),entity_maps=entitymaps)
# Create the problem instance
problem = NavierStokesProblem(F, J, sols, bc)
# v0.9
#jac = ufl.derivative(res, sols[0], trials[0]) + ufl.derivative(res, sols[1], trials[1]) + ufl.derivative(res, sols[2], trials[2]) + ufl.derivative(res, sols[3], trials[3])
#F = dolfinx.fem.form(ufl.extract_blocks(res),entity_maps=entitymaps)
#J = dolfinx.fem.form(ufl.extract_blocks(jac),entity_maps=entitymaps)
# v0.10
F_vec = dolfinx.fem.petsc.create_vector(dolfinx.fem.extract_function_spaces(problem._F), kind=PETSc.Vec.Type.MPI)
J_mat = dolfinx.fem.petsc.create_matrix(problem._J)
sol = dolfinx.fem.petsc.create_vector(dolfinx.fem.extract_function_spaces(problem._F), kind=PETSc.Vec.Type.MPI)
sol.set(0.0) # Initialize the solution vector (DIRTY HACK TODO!)
# Compute local offsets for block vector (use size_local, not size_global)
maps = [
(si.function_space.dofmap.index_map, si.function_space.dofmap.index_map_bs)
for si in sols
]
local_offset_p1 = 0
local_offset_v1 = maps[0][0].size_local * maps[0][1]
local_offset_p2 = local_offset_v1 + maps[1][0].size_local * maps[1][1]
local_offset_v2 = local_offset_p2 + maps[2][0].size_local * maps[2][1]
# Initialize solution vector from current solutions
sol.array[local_offset_p1:local_offset_v1] = sols[0].x.array[:local_offset_v1]
sol.array[local_offset_v1:local_offset_p2] = sols[1].x.array[:local_offset_p2 - local_offset_v1]
sol.array[local_offset_p2:local_offset_v2] = sols[2].x.array[:local_offset_v2 - local_offset_p2]
sol.array[local_offset_v2:] = sols[3].x.array[:len(sol.array) - local_offset_v2]
# Define a function to monitor iteration residuals
def monitor_residual(snes, it, norm):
if MPI.COMM_WORLD.rank == 0:
print(it, norm)
# Create SNES solver
snes = petsc4py.PETSc.SNES().create(MPI.COMM_WORLD)
snes.setTolerances(max_it=50, atol=1e-8) # Increased max iterations for strict convergence
snes.getKSP().setType("preonly")
snes.getKSP().getPC().setType("lu")
snes.getKSP().getPC().setFactorSolverType("mumps")
opts = PETSc.Options() # type: ignore
#opts["mat_mumps_icntl_14"] = 80 # Increase MUMPS working memory
opts["mat_mumps_icntl_24"] = 1 # Option to support solving a singular matrix (pressure nullspace)
opts["mat_mumps_icntl_25"] = 0 # Option to support solving a singular matrix (pressure nullspace)
opts["mat_mumps_cntl_3"] = 1e-6 # Threshold for null pivot detection
opts["ksp_error_if_not_converged"] = 1
snes.getKSP().setFromOptions()
snes.setObjective(problem.obj)
snes.setFunction(problem.F, F_vec)
snes.setJacobian(problem.J, J=J_mat, P=None)
snes.setMonitor(monitor_residual)
snes.setConvergenceHistory()
snes.solve(None, sol)
I changed the block-assemble routines properly as discussed in the release notes to v0.10, but for some reason I get the PETSc error: New nonzero at (44,44) caused a malloc. Use MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE) to turn off this check
when calling apply_lifting() (but not for the first time).
Residual and Jacobi matrix are equivalent for both v0.9 and v0.10 versions, until the fuction F is called for the second time, when the error occurs. Essentially the SNES is printing out a single residual (same for 0.9 and 0.10), and then I run into the error.
As I said before, the v0.9 version worked, so I assume something was implicitly done that now has do be performed manually, like apply lifting and the boundary condition business. Can someone spot the error? The 0.9 - version is commented.
If a minimum example is required, I can try to assemble one, but it will be a big minimal example most likely.
Thank you very much!