Here is a silly example which should only be considered for its syntax. The preconditioner I’ve designed here is deliberately not a good approximation of the Jacobian.
import matplotlib.pyplot as plt
from dolfin import *
class Problem(NonlinearProblem):
def __init__(self, J, J_pc, F, bcs):
self.bilinear_form = J
self.preconditioner_form = J_pc
self.linear_form = F
self.bcs = bcs
NonlinearProblem.__init__(self)
def F(self, b, x):
pass
def J(self, A, x):
pass
def form(self, A, P, b, x):
assemble(self.linear_form, tensor=b)
assemble(self.bilinear_form, tensor=A)
assemble(self.preconditioner_form, tensor=P)
for bc in self.bcs:
bc.apply(b, x)
bc.apply(A)
bc.apply(P)
class CustomSolver(NewtonSolver):
def __init__(self):
NewtonSolver.__init__(self, mesh.mpi_comm(),
PETScKrylovSolver(), PETScFactory.instance())
def solver_setup(self, A, P, problem, iteration):
self.linear_solver().set_operators(A, P)
PETScOptions.set("ksp_type", "minres")
PETScOptions.set("pc_type", "hypre")
PETScOptions.set("ksp_view")
self.linear_solver().set_from_options()
mesh = UnitSquareMesh(32, 32)
V = FunctionSpace(mesh, "CG", 1)
g = Constant(1.0)
bcs = [DirichletBC(V, g, "near(x[0], 1.0) and on_boundary")]
u = Function(V)
v = TestFunction(V)
f = Expression("x[0]*sin(x[1])", degree=2)
F = inner((1 + u**2)*grad(u), grad(v))*dx - f*v*dx
J = derivative(F, u)
du = TrialFunction(V)
J_pc = dot(grad(du), grad(v))*dx
problem = Problem(J, J_pc, F, bcs)
custom_solver = CustomSolver()
custom_solver.solve(problem, u.vector())
plt.figure()
plot(u, title="Solution")
plt.figure()
plot(grad(u), title="Solution gradient")
plt.show()