Hello,
I have issues validating the gradient (with a Taylor-test) of some operations based on a simple custom function I implemented. The convergence rates are sometimes correct depending on the operation I am performing before the custom function is called…
Here is first the Custom Function contained in the file: map_dofs.py
, which rearranges the degrees of freedom of an input function (according to a specific “map”) to an output function of the same FunctionSpace:
from fenics import *
from fenics_adjoint import *
def map_dofs(func0,dof_map_1in0):
func1 = Function(func0.function_space())
func1.vector()[:] = func0.vector()[dof_map_1in0]
return func1
and the overloaded version containing the corresponding adjoint derivation (inside file: map_dofs_overloaded.py
):
from fenics import *
from fenics_adjoint import *
from pyadjoint import Block
from pyadjoint.overloaded_function import overload_function
from map_dofs import map_dofs
import numpy as np
backend_map_dofs = map_dofs
class MapDofsBlock(Block):
def __init__(self, func0, dof_map_1in0, **kwargs):
super(MapDofsBlock, self).__init__()
self.kwargs = kwargs
self.add_dependency(func0)
self.dof_map_1in0 = dof_map_1in0 # Store the map
def __str__(self):
return 'MapDofsBlock'
def evaluate_adj_component(self, inputs, adj_inputs, block_variable, idx, prepared=None):
adj_input = adj_inputs[0]
x = inputs[0].vector()
output = adj_input
output[:] = output[self.dof_map_1in0]
return output
def recompute_component(self, inputs, block_variable, idx, prepared):
return backend_map_dofs(inputs[0],self.dof_map_1in0)
map_dofs = overload_function(map_dofs, MapDofsBlock)
Here is a minimal working example:
from dolfin import *
from dolfin_adjoint import *
import numpy as np
from map_dofs_overloaded import map_dofs
mesh = UnitSquareMesh(4,4)
V = FunctionSpace(mesh,'CG', 2)
u_0 = interpolate(Expression("x[0]",degree=3), V)
n = V.dim() # Number of dof
list_idx_dof = [x for x in range(n)]
np.random.shuffle(list_idx_dof) # Shuffle randomly the list of indexes for a general example
dof_map_1in0 = list_idx_dof
power_value = 1 # To change to = 3 for error
u_0_pen = project(u_0**power_value,V)
u_1 = map_dofs(u_0_pen,dof_map_1in0) # Use a custom function
m0 = Control(u_0)
J = assemble(((u_1)**2) *dx)**(1/2) # Arbitrary Functional to serve as objective function
Jhat = ReducedFunctional(J,m0)
# Taylor remainder test to verify the computation of the gradient
h = Function(V)
h.vector()[:] = 0.01*(1.0- 0.0)
conv_rate = taylor_test(Jhat,u_0,h)
which runs as expected and gives:
Running Taylor test
Computed residuals: [1.513063333844866e-09, 3.7829600191182635e-10, 9.457792696186402e-11, 2.3644987814699903e-11]
Computed convergence rates: [1.9998849429826506, 1.9999401040323632, 1.9999691216334434]
However if the power_value
is superior to 1, like power_value = 3
, the Taylor Test fails with convergence rates of ~1.0 instead of ~2.0:
Running Taylor test
Computed residuals: [6.020749139947801e-05, 3.009948012942537e-05, 1.5048673712567212e-05, 7.524070273277936e-06]
Computed convergence rates: [1.0002044381198376, 1.000102226048465, 1.0000511147760685]
Do you have some ideas regarding these wrong convergence rates?
Thank you for your time