Below, I modified the official tutorial of “Computing convergence rates”, to compare exact and fenicsx solution of potential flow past a unit cube with constant neumann and dirichlet condition. (I only substitute the original demo function with the potential flow function, which is even simpler).
I only changed “def u_ex” and “def solve_poisson”
However, the L2 and H1 error both increase stably as the mesh goes finer! So the convergence cannot be achieved!!
I have tried everything, and I have been stuck here for 3 weeks. Appreciate any help!!
original script: Error control: Computing convergence rates — FEniCSx tutorial
# # Error control: Computing convergence rates
# Author: Jørgen S. Dokken, Hans Petter Langtangen, Anders Logg
# %%
from dolfinx import default_scalar_type, mesh, fem
from dolfinx.fem import (Expression, Function, functionspace,
assemble_scalar, dirichletbc, form, locate_dofs_geometrical, locate_dofs_topological)
from dolfinx.fem.petsc import LinearProblem
from dolfinx.mesh import create_unit_square, create_unit_cube, locate_entities_boundary
from mpi4py import MPI
from ufl import SpatialCoordinate, TestFunction, TrialFunction, div, dot, dx, grad, inner
import ufl
import numpy as np
from petsc4py.PETSc import ScalarType
U_inlet = 1.0
def u_ex(mod):
return lambda x: U_inlet * x[0]
u_numpy = u_ex(np)
u_ufl = u_ex(ufl)
def solve_poisson(N=4, degree=3):
# domain = mesh.create_unit_cube(MPI.COMM_WORLD, N, N, N, mesh.CellType.hexahedron)
domain = mesh.create_unit_square(MPI.COMM_WORLD, N, N)
x = SpatialCoordinate(domain)
V = functionspace(domain, ("Lagrange", degree))
# Define trial and test functions
phi = ufl.TrialFunction(V)
v = ufl.TestFunction(V)
# Bilinear form (Laplace equation)
a = ufl.inner(ufl.grad(phi), ufl.grad(v)) * ufl.dx
# f = 0.0000000000000001
g = U_inlet
# get dimension
tdim = domain.topology.dim
#print("domain dimension is", tdim)
fdim = tdim - 1
domain.topology.create_connectivity(fdim, tdim)
#boundary_facets = mesh.exterior_facet_indices(domain.topology)
## Linear form
# Neumann BC
def boundary_x1(x):
return np.isclose(x[0], 1.0)
boundary_facets_x1 = mesh.locate_entities_boundary(domain, fdim, boundary_x1)
mt = mesh.meshtags(domain, fdim, boundary_facets_x1, 1)
ds = ufl.Measure("ds", domain=domain, subdomain_data=mt)
L = ufl.inner(g, v) * ds(1) # g = ∂φ/∂n = U_inlet = 1
# Dirichlet BC new
facets = mesh.locate_entities_boundary(
domain,
dim=fdim,
marker=lambda x: np.isclose(x[0], 0.0),
)
dofs = fem.locate_dofs_topological(V=V, entity_dim=fdim, entities=facets)
bc_dirichlet = fem.dirichletbc(value=ScalarType(0), dofs=dofs, V=V)
# # Dirichlet BC on x=1
# def boundary_x0(x):
# return np.isclose(x[0], 1.0)
# boundary_dofs_x0 = fem.locate_dofs_geometrical(V, boundary_x0)
# bc_dirichlet = fem.dirichletbc(1.0, boundary_dofs_x0, V)
default_problem = fem.petsc.LinearProblem(a, L, bcs=[bc_dirichlet], petsc_options={"ksp_type": "preonly", "pc_type": "lu"})
return default_problem.solve(), u_ufl(x)
# %% [markdown]
# Now, we can compute the error between the analyical solution `u_ex=u_ufl(x)` and the approximated solution `uh`. A natural choice might seem to compute `(u_ex-uh)**2*ufl.dx`.
# %%
uh, u_ex = solve_poisson(2)
print("uh:")
print(uh)
print("u_ex:")
print(u_ex)
comm = uh.function_space.mesh.comm
error = form((uh - u_ex)**2 * ufl.dx)
E = np.sqrt(comm.allreduce(assemble_scalar(error), MPI.SUM))
if comm.rank == 0:
print(f"L2-error: {E:.2e}")
# %% [markdown]
# Sometimes it is of interest to compute the error fo the gradient field, $\vert\vert \nabla(u_e-u_h)\vert\vert$, often referred to as the $H_0^1$-norm of the error, this can be expressed as
# %%
eh = uh - u_ex
error_H10 = form(dot(grad(eh), grad(eh)) * dx)
E_H10 = np.sqrt(comm.allreduce(assemble_scalar(error_H10), op=MPI.SUM))
if comm.rank == 0:
print(f"H01-error: {E_H10:.2e}")
# %% [markdown]
# ### Reliable error norm computation
# However, as this gets expanded to `u_ex**2 + uh**2 - 2*u_ex*uh`. If the error is small, (and the solution itself is of moderate size), this calculation will correspond to subtract two positive numbers `u_ex**2 + uh**2`$\sim 1$ and `2*u_ex*u`$\sim 1$ yielding a small number, prone to round-off errors.
#
# To avoid this issue, we interpolate the approximate and exact solution into a higher order function space. Then we subtract the degrees of freedom from the interpolated functions to create a new error function. Then, finally, we assemble/integrate the square difference and take the square root to get the L2 norm.
# %%
def error_L2(uh, u_ex, degree_raise=3):
# Create higher order function space
degree = uh.function_space.ufl_element().degree
family = uh.function_space.ufl_element().family_name
mesh = uh.function_space.mesh
W = functionspace(mesh, (family, degree + degree_raise))
# Interpolate approximate solution
u_W = Function(W)
u_W.interpolate(uh)
# Interpolate exact solution, special handling if exact solution
# is a ufl expression or a python lambda function
u_ex_W = Function(W)
if isinstance(u_ex, ufl.core.expr.Expr):
u_expr = Expression(u_ex, W.element.interpolation_points())
u_ex_W.interpolate(u_expr)
else:
u_ex_W.interpolate(u_ex)
# Compute the error in the higher order function space
e_W = Function(W)
e_W.x.array[:] = u_W.x.array - u_ex_W.x.array
# Integrate the error
error = form(ufl.inner(e_W, e_W) * ufl.dx)
error_local = assemble_scalar(error)
error_global = mesh.comm.allreduce(error_local, op=MPI.SUM)
return np.sqrt(error_global)
# %% [markdown]
# ## Computing convergence rates
# Let us consider a sequence of mesh resolutions $h_0>h_1>h_2$, where $h_i=\frac{1}{N_i}$ we compute the errors for a range of $N_i$s
# %%
Ns = [4, 8, 16, 32, 64, 128, 256]
Es = np.zeros(len(Ns), dtype=default_scalar_type)
hs = np.zeros(len(Ns), dtype=np.float64)
for i, N in enumerate(Ns):
uh, u_ex = solve_poisson(N, degree=1)
comm = uh.function_space.mesh.comm
# One can send in either u_numpy or u_ex
# For L2 error estimations it is reccommended to send in u_numpy
# as no JIT compilation is required
eh = uh - u_ex
error_H10 = form(dot(grad(eh), grad(eh)) * dx)
E_H10 = np.sqrt(comm.allreduce(assemble_scalar(error_H10), op=MPI.SUM))
# if comm.rank == 0:
# print(f"H01-error: {E_H10:.2e}")
Es[i] = error_L2(uh, u_numpy)
hs[i] = 1. / Ns[i]
if comm.rank == 0:
print(f"h: {hs[i]:.2e} L2-Error: {Es[i]:.2e} H01-error: {E_H10:.2e}")
# %% [markdown]
# If we assume that $E_i$ is of the form $E_i=Ch_i^r$, with unknown constants $C$ and $r$, we can compare two consecutive experiments, $E_{i-1}= Ch_{i-1}^r$ and $E_i=Ch_i^r$, and solve for $r$:
# ```{math}
# r=\frac{\ln(E_i/E_{i-1})}{\ln(h_i/h_{i-1})}
# ```
# The $r$ values should approach the expected convergence rate (which is typically the polynomial degree + 1 for the $L^2$-error.) as $i$ increases. This can be written compactly using `numpy`.
# %%
rates = np.log(Es[1:] / Es[:-1]) / np.log(hs[1:] / hs[:-1])
if comm.rank == 0:
print(f"Rates: {rates}")
# %% [markdown]
# We also do a similar study for different orders of polynomial spaces to verify our previous claim.
# %%
degrees = [1, 2, 3, 4]
for degree in degrees:
Es = np.zeros(len(Ns), dtype=default_scalar_type)
hs = np.zeros(len(Ns), dtype=np.float64)
for i, N in enumerate(Ns):
uh, u_ex = solve_poisson(N, degree=degree)
comm = uh.function_space.mesh.comm
Es[i] = error_L2(uh, u_numpy, degree_raise=1)
hs[i] = 1. / Ns[i]
if comm.rank == 0:
print(f"h: {hs[i]:.2e} Error: {Es[i]:.2e}")
rates = np.log(Es[1:] / Es[:-1]) / np.log(hs[1:] / hs[:-1])
if comm.rank == 0:
print(f"Polynomial degree {degree:d}, Rates {rates}")
# %% [markdown]
# ### Infinity norm estimates
# We start by creating a function to compute the infinity norm, the max difference between the approximate and exact solution.
# %%
def error_infinity(u_h, u_ex):
# Interpolate exact solution, special handling if exact solution
# is a ufl expression or a python lambda function
comm = u_h.function_space.mesh.comm
u_ex_V = Function(u_h.function_space)
if isinstance(u_ex, ufl.core.expr.Expr):
u_expr = Expression(u_ex, u_h.function_space.element.interpolation_points())
u_ex_V.interpolate(u_expr)
else:
u_ex_V.interpolate(u_ex)
# Compute infinity norm, furst local to process, then gather the max
# value over all processes
error_max_local = np.max(np.abs(u_h.x.array - u_ex_V.x.array))
error_max = comm.allreduce(error_max_local, op=MPI.MAX)
return error_max
# %% [markdown]
# Running this for various polynomial degrees yields:
# %%
for degree in degrees:
Es = np.zeros(len(Ns), dtype=default_scalar_type)
hs = np.zeros(len(Ns), dtype=np.float64)
for i, N in enumerate(Ns):
uh, u_ex = solve_poisson(N, degree=degree)
comm = uh.function_space.mesh.comm
Es[i] = error_infinity(uh, u_numpy)
hs[i] = 1. / Ns[i]
if comm.rank == 0:
print(f"h: {hs[i]:.2e} Error: {Es[i]:.2e}")
rates = np.log(Es[1:] / Es[:-1]) / np.log(hs[1:] / hs[:-1])
if comm.rank == 0:
print(f"Polynomial degree {degree:d}, Rates {rates}")
# %% [markdown]
# We observe super convergence for second order polynomials, yielding a fourth order convergence.
Below is the output Log:
L2-error: 8.11e-15
H01-error: 1.39e-14
h: 2.50e-01 L2-Error: 3.61e-16 H01-error: 7.57e-16
h: 1.25e-01 L2-Error: 2.54e-15 H01-error: 4.26e-15
h: 6.25e-02 L2-Error: 9.76e-15 H01-error: 1.57e-14
h: 3.12e-02 L2-Error: 3.94e-14 H01-error: 6.29e-14
h: 1.56e-02 L2-Error: 1.82e-13 H01-error: 2.87e-13
h: 7.81e-03 L2-Error: 7.54e-13 H01-error: 1.19e-12
h: 3.91e-03 L2-Error: 3.05e-12 H01-error: 4.80e-12
Rates: [-2.81532724 -1.94061354 -2.01154925 -2.2075718 -2.05273341 -2.01701287]