Newton method implemented manually does not converge

Hello,

I have written a code to solve a large deformation elasticity problem. I apply DirichletBC on the top while the bottom is fixed (along the Y direction). I am using Newton method manually to solve this, but it is not converging. Here is a minimal code:

from __future__ import print_function
from fenics import *
from ufl import *
import numpy as np 
from dolfin import *
print(DirichletBC.__doc__)

# Parameters
size = 1.0; 
mu = 41.0
lamb = 87.2
T_ = 1.0           # final time
num_steps = 2    # number of time steps
dt = T_ / num_steps # time step size


# Create mesh and define function space
mesh = BoxMesh(Point(0, 0, 0), Point(size, size, size), 5,5,5) # refinement, refinement, refinement)

V = VectorFunctionSpace(mesh, 'P', 1)

#Boundary conditions
v_top= 1e-3 

u_top = Expression('v_top*t',
                 degree=1, v_top=v_top, t=0.0)

def boundary_bot(x, on_boundary):
    return on_boundary and near(x[1],0.0,tol)

def boundary_top(x, on_boundary):
    return on_boundary and near(x[1],size,tol)

bc_bottom = DirichletBC(V.sub(1), Constant(0.0), boundary_bot)

bc_top = DirichletBC(V.sub(1), u_top, boundary_top)

bcs=[bc_bottom,bc_top] 

# Define variational problem
u = Function(V)
u_ = TestFunction(V)
du = Function(V, name="Iteration correction")
Du = Function(V, name="Current increment")
d = u.geometric_dimension()  # space dimension
v = TrialFunction(V)
f = Constant((0, 0, 0))
T = Constant((0, 0, 0))
I = Identity(d)                 # Identity tensor

def C_int(d):
    I = np.eye(d)
    II = np.einsum("ik,jl->ijkl", I, I) + np.einsum("il,jk->ijkl", I, I)
    JJ = np.einsum("ij,kl->ijkl", I, I)
    return as_tensor(mu*II + lamb*JJ)

def get_F(u):
    F= I + grad(u)
    return F

def get_S(F):
    E = 0.5*(F.T*F - I)                       # Right Cauchy-Green tensor
    i,j,k,l = indices(4)
    S= as_tensor(C_int(d)[i,j,k,l]*E[k,l],(i,j))
    return S

def get_P(u):
    F=get_F(u)
    E = 0.5*(F.T*F - I)   
    S= get_S(F)
    P = F*S 
    return P 

# residual and Jacobian
res= inner(nabla_grad(u_),get_P(Du))*dx  - dot(T, u_)*ds  

Jac= derivative(res,Du,v)

# Time-stepping
t = 0.0
tol=1e-8
Nitermax=10

for n in range(num_steps):
    t += dt
    print('t=',round(t,3),'T=',T_)
    u_top.t = t 
    
    Du.interpolate(Constant((0, 0, 0)))
    niter = 0

    while  niter<Nitermax:
        A, Res = assemble_system(Jac, -res, bcs)
        solve(A, du.vector(), Res) 
        Du.assign(Du+du)

        a= assemble(res)
        for bc in bcs: 
            bc.apply(a)

        nRes = a.norm("l2")
        print("    Residual:", nRes)
        niter += 1
    
    u.assign(u+Du)

Ideally, the norm of the residual should decrease quickly inside the while loop. However, it is almost not changing at all. Here is the output:

t= 0.5 T= 1.0
    Residual: 0.003000596335349779
    Residual: 0.0030000046508762163
    Residual: 0.0030000046197302807
    Residual: 0.003000004621295162
    Residual: 0.003000004622873061
    Residual: 0.003000004624452816
    Residual: 0.003000004626034452
    Residual: 0.0030000046276179465
    Residual: 0.003000004629203315
    Residual: 0.0030000046307905636
t= 1.0 T= 1.0
    Residual: 0.0060047676568817715
    Residual: 0.006000037496833246
    Residual: 0.006000036995869351
    Residual: 0.0060000370208320025
    Residual: 0.0060000370462124746
    Residual: 0.006000037071653371
    Residual: 0.006000037097154581
    Residual: 0.006000037122716251
    Residual: 0.006000037148338622
    Residual: 0.00600003717402185

Can anyone please let me know what is wrong here.
Thanks.

The problem comes from applying the Dirichlet bcs for each iteration corrections, see also here

3 Likes

Thanks a lot, that indeed solves the problem.