Large compression in contact mechanics

Dear all,

I would like to implement a large uniaxial compression (of 50%) of a sphere.
I am currently using a contact penalty of the form:

    # The current vertical position of the mesh points on the arc
    z_curr = x_c[1] + u[1]
    
    # Penetration: positive when the mesh z is higher than the plate z
    gap = z_curr-z_plate 
    penetration = ufl.max_value(0, gap)
    
    ds = ufl.Measure("ds", domain=domain, subdomain_data=facet_tags)
    
    # Penalty Energy 
    Penalty_energy = penalty_parameter * penetration**2 *2*np.pi*r * ds(1)
    # The derivatives will automatically include this constant
    Residual_penalty = ufl.derivative(Penalty_energy, u, u_) 
    penalty_tangent_form = ufl.derivative(Residual_penalty, u, v) 

This approach is motivated by the Hertz contact (see the code in this question, and this tutorial).

I am just showing a bit of the code as it seems to me a more general question.

I tried multiple values for the penalty_parameter, without success.
The problem is that beyond 25% the compression gives unexpected result, as you can see in the following images. Tell me if you think I just past the full code.

25%:

30%:

I believe it is due to the simple quadratic penalty, and that a more complex model should be implemented for large compressions.
What do you think about it ?

PS: In order to talk about a minimal code, here is the code corresponding to the Hertz contact in the tutorial. I believe the same kind of problem is rising there:

import numpy as np
import ufl
from dolfinx import mesh, fem, default_scalar_type
from dolfinx.fem.petsc import NonlinearProblem
from mpi4py import MPI
from ufl import tr, sym, grad, Identity, inner
from dolfinx import plot
import pyvista

# --- 1. MESH & MAPPING ---
N_r, N_z = 160, 160
R_domain, H = 1.0, 1.0

# Create simple rectangle
domain = mesh.create_rectangle(MPI.COMM_WORLD, [[0.0, -H], [R_domain, 0.0]], [N_r, N_z], cell_type=mesh.CellType.triangle)
def map_coordinates(x):
    y = np.zeros_like(x)
    # Jacobian is never zero
    y[:, 0] =R_domain*( 0.2 * x[:, 0]/R_domain + 0.8 * x[:, 0]**2/R_domain**2)
    y[:, 1] =-H*( -0.2 * x[:, 1]/H + 0.8 * x[:, 1]**2/H**2)
    return y
domain.geometry.x[:] = map_coordinates(domain.geometry.x)

# --- 2. FACET TAGGING ---
f_dim = domain.topology.dim - 1
def top(x): return np.isclose(x[1], 0.0)
def bottom(x): return np.isclose(x[1], -H)
def axis(x): return np.isclose(x[0], 0.0)

# Locate and tag
top_facets = mesh.locate_entities_boundary(domain, f_dim, top)
bottom_facets = mesh.locate_entities_boundary(domain, f_dim, bottom)
axis_facets = mesh.locate_entities_boundary(domain, f_dim, axis)

# Combine into meshtags (avoiding duplicates)
indices = np.hstack([top_facets, bottom_facets, axis_facets])
values = np.hstack([np.full(len(top_facets), 1, np.int32),np.full(len(bottom_facets), 2, np.int32),np.full(len(axis_facets), 3, np.int32)])
# Sort indices for meshtags
sorted_idx = np.argsort(indices)
facet_tags = mesh.meshtags(domain, f_dim, indices[sorted_idx], values[sorted_idx])
#print(f'indices={indices}, \n values={values}, \n sorted_idx={sorted_idx}, \n list of the facet tags={np.unique(facet_tags.values)}')
ds = ufl.Measure("ds", domain=domain, subdomain_data=facet_tags)


fdim = domain.topology.dim - 1


# --- 3. SPACES & BCs ---
V = fem.functionspace(domain, ("CG", 1, (2,))) #continuous Galerkin (Lagrange)
u = fem.Function(V)
du = ufl.TrialFunction(V)
u_ = ufl.TestFunction(V)
facets_axis = mesh.locate_entities_boundary(domain, fdim, axis)
facets_bottom = mesh.locate_entities_boundary(domain, fdim, bottom)
# Locate dofs (IMPORTANT tuple syntax)
dofs_axis = fem.locate_dofs_topological(V.sub(0), fdim, facets_axis)
dofs_bottom = fem.locate_dofs_topological(V.sub(1), fdim, facets_bottom)
# Ur = 0 on axis (ID 3)
bc_axis = fem.dirichletbc(fem.Constant(domain, default_scalar_type(0.0)), dofs_axis, V.sub(0))
# Uz = 0 on bottom (ID 2)
bc_bottom = fem.dirichletbc(fem.Constant(domain,default_scalar_type(0.0)), dofs_bottom, V.sub(1))
bcs = [bc_axis, bc_bottom]
# --- 4. AXISYMMETRIC PHYSICS ---
E, nu = 10.0, 0.3
mu = fem.Constant(domain, E / (2*(1+nu)))
lmbda = fem.Constant(domain, E*nu / ((1+nu)*(1-2*nu)))
# --- Mesh Size Calculation ---
# 1. Get the topological dimension 
tdim = domain.topology.dim
# 2. Get the indices of all local cells
num_cells = domain.topology.index_map(tdim).size_local
cells = np.arange(num_cells, dtype=np.int32)
# 3. Call the .h() method on the OBJECT (domain)
h_values = domain.h(tdim, cells)
# 4. Now compute the average
avg_h = np.mean(h_values)
# Scaled Penalty: E / avg_h ensures numerical stability
pen_float = 1e3 * float(E) / avg_h
pen = fem.Constant(domain, pen_float)
print('penalty=',pen_float)

R_ind = 0.5 
x_c = ufl.SpatialCoordinate(domain)
# Numerical safety for r=0
r = x_c[0]
r_safe = ufl.conditional(r > 1e-10, r, 1e-10)
d_ind = fem.Constant(domain, 0.05)
obstacle = -d_ind + (r**2)/(2*R_ind)
def eps_axi(v): return sym(grad(v))
def eps_theta(v): return v[0] / r_safe
def div_axi(v): return tr(eps_axi(v)) + eps_theta(v)
def sigma_axi(v): return lmbda * div_axi(v) * Identity(2) + 2 * mu * eps_axi(v)
def sigma_theta(v): return lmbda * div_axi(v) + 2 * mu * eps_theta(v)

# --- 5. VARIATIONAL FORM ---
#Contact
#eps = fem.Constant(domain, 1e-3)  # smoothing parameter
gap = -obstacle + u[1]
penetration = ufl.max_value(0, gap) #0.5 * gap * (1.0 + ufl.tanh((gap-eps) / eps))
#Energy
Pi_int = (inner(sigma_axi(u), eps_axi(u)) + sigma_theta(u) * eps_theta(u)) * np.pi * r * ufl.dx
Pi_con = pen * penetration**2 * np.pi*r * ds(1)
Pi = Pi_int + Pi_con

F = ufl.derivative(Pi, u, u_)
J = ufl.derivative(F, u, du)

# --- 1. Update the Constant ---
d_target = 0.5
n_steps = 10
d_increments = np.linspace(0, d_target, n_steps + 1)[1:] 
#d_increments = [0]
# --- 2. Robust Solver Options ---
petsc_opts = {
    "ksp_type": "preonly",
    "pc_type": "lu",
    "pc_factor_mat_solver_type": "mumps",
    "snes_type": "newtonls",
    "snes_linesearch_type": "bt", 
    "snes_max_it": 100,
    "snes_atol": 1e-8,
    "snes_rtol": 1e-8,
}

problem = NonlinearProblem(F, u, bcs=bcs, J=J, petsc_options_prefix="hertzcontact_", petsc_options=petsc_opts)

print(f"Starting indentation loop (Total target: {d_target})...")

for i, d_val in enumerate(d_increments):
    # Update the indentation depth constant
    d_ind.value = d_val
    print(f"\nStep {i+1}/{n_steps}: Indentation = {d_val:.4f}")
    
    try:
        n_iters, converged = problem.solve()
        if converged:
            print(f"  Converged in {n_iters} iterations.")
        else:
            print(f"  Warning: Step {i+1} did not converge!")
            break # Stop if we lose convergence
    except Exception as e:
        print(f"  Critical Failure: {e}")
        break

# --- 3. Visualization ---
if not np.all(np.isfinite(u.x.array)):
    print("Error: NaNs detected.")
else:
    # 1. Create Mesh
    topology, cell_types, geometry = plot.vtk_mesh(V)
    grid = pyvista.UnstructuredGrid(topology, cell_types, geometry)
    # 2. Attach the displacement data
    values = np.zeros((geometry.shape[0], 3))
    values[:, :domain.geometry.dim] = u.x.array.reshape(geometry.shape[0], domain.geometry.dim)
    grid.point_data["Displacement"] = values
    # 3. Warp the mesh by the displacement (scaled for visibility)
    warped = grid.warp_by_vector("Displacement", factor=1.0)
    # 4. Plotting
    plotter = pyvista.Plotter()
    plotter.add_mesh(warped, show_edges=True, scalars="Displacement", component=1) # Plot U_z
    plotter.show_axes()
    plotter.view_xy()
    plotter.show()

Hello,

there is an implementation using an obstacle problem formulation and solved via LVPP. 50% is really way out of the bounds of linear elasticity assumptions though.

Not sure if the weird little bulges are related to the linear elasticity assumptions.

You’re formulation is maybe not ideal for such a large force. Maybe try a penalty approach for penetration where you solve for increasing values of penalty factor.

1 Like