Alternative formulation for Time dependent Dirichlet BC

Hi all,

I am having an issue with implementing time dependent boundary conditions such that I don’t need to redefine the NonlinearProblem at each time step. Currently, as seen below in the first MWE, I am effectively creating a new problem with boundary conditions given by functions input1 & input2 (at a single point).

However, 1) this feels wildly inefficient and 2) it means that I cannot for example set the krylov solver to not precondition at each step (as a new solver is defined at every iteration). I think that this can be remedied by making a class and interpolating the function at each step to update BCs (from following other threads).

My attempt to introduce what I have mentioned above is in the second MWE but the output is just zeros. Any help would be greatly appreciated! :slight_smile:

#!/usr/bin/env python3

import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
from petsc4py import PETSc
from dolfinx import fem, mesh, default_scalar_type, log
from dolfinx.io import XDMFFile
import ufl
import dolfinx.fem.petsc
import dolfinx.nls.petsc
import time
start_time = time.time()

## Create mesh
L = 1.0  # Length of the rod
nx, ny, nz = 10, 10, 100  # Number of elements in x, y, and z directions
domain = mesh.create_box(MPI.COMM_WORLD, [np.array([0, 0, 0]), np.array(
    [0.1, 0.1, L])], [nx, ny, nz], cell_type=mesh.CellType.hexahedron)

## Create function spaces
V = fem.VectorFunctionSpace(domain, ("CG", 1))
fdim = domain.topology.dim - 1

# Physical constants
E = 200e9
nu = 0.27
mu = E / (2.0 * (1 + nu))
lmbda = E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))
rho = 7990

# Time integration parameters
am = 0.2
af = 0.4
g = 0.5 + af - am
alpha_m = fem.Constant(domain, am)
alpha_f = fem.Constant(domain, af)
gamma = fem.Constant(domain, g)
beta = fem.Constant(domain, (g + 0.5)**2 / 4.0)

T = 5.0e-4
Timp = 5.0e-4
Nsteps = 100
dt = fem.Constant(domain, T / Nsteps)

## Time dep. boundary conditions
Amp=1e-8 #amplitude of perturbation

def point1(x):
    return np.logical_and(np.logical_and(np.isclose(x[2], 0.0), np.isclose(x[1], 0)), np.isclose(x[0], 0.05))
def point2(x):
    return np.logical_and(np.logical_and(np.isclose(x[2], 0.0), np.isclose(x[1], 0.1)), np.isclose(x[0], 0.05))

def input1(t):
    return np.array([Amp*t, 0.0,0.0], dtype=default_scalar_type)
def input2(t):
    return np.array([-1*Amp*t, 0.0,0.0], dtype=default_scalar_type)

ti=0
bc_fun1=input1(ti)
bc_fun2=input2(ti)

point1_dofs = fem.locate_dofs_geometrical(V, point1) 
bc1 = fem.dirichletbc(bc_fun1, point1_dofs, V)

point2_dofs = fem.locate_dofs_geometrical(V, point2) 
bc2 = fem.dirichletbc(bc_fun2, point2_dofs, V)

# Functions and fields
u_ = ufl.TestFunction(V)
u = fem.Function(V, name='Displacement')
u_old = fem.Function(V)
v_old = fem.Function(V)
a_old = fem.Function(V)

# Measures
dx = ufl.Measure("dx", domain=domain)

## GA method functions
def sigma(r):
    return lmbda*ufl.nabla_div(r)*ufl.Identity(len(r)) + 2*mu*ufl.sym(ufl.grad(r))
def m(u, u_):
    return rho*ufl.inner(u, u_)*dx
def k(u, u_):
    return ufl.inner(sigma(u), ufl.grad(u_))*dx

## Update variables
def update_a(u, u_old, v_old, a_old, ufl_=True):
    if ufl_:
        dt_ = dt
        beta_ = beta
        return (u-u_old-dt_*v_old)/beta_/dt_**2 - (1-2*beta_)/2/beta_*a_old

    else:
        dt_ = float(dt)
        beta_ = float(beta)
        return (u-u_old-dt_*v_old)/beta_/dt_**2 - (1-2*beta_)/2/beta_*a_old

def update_v(a, u_old, v_old, a_old, ufl_=True):
    if ufl_:
        return v_old + dt*((1-gamma)*a_old + gamma*a)
    else:
        return v_old + float(dt) * ((1 - float(gamma)) * a_old + float(gamma)*a)

def update_fields(u, u_old, v_old, a_old):
    u_vec, u0_vec = u.x.array[:], u_old.x.array[:]
    v0_vec, a0_vec = v_old.x.array[:], a_old.x.array[:]

    a_vec = update_a(u_vec, u0_vec, v0_vec, a0_vec, ufl_=False)
    v_vec = update_v(a_vec, u0_vec, v0_vec, a0_vec, ufl_=False)

    v_old.x.array[:] = v_vec     #numpy.ndarray stored inside dolfinx.fem.function.Function
    a_old.x.array[:] = a_vec
    u_old.x.array[:] = u_vec

def avg(x_old, x_new, alpha):
    return alpha*x_old + (1-alpha)*x_new

a_new = update_a(u, u_old, v_old, a_old, ufl_=True)     #ufl.algebra.Sum
v_new = update_v(a_new, u_old, v_old, a_old, ufl_=True)

## Residual to solve
res = m(avg(a_old, a_new, alpha_m), u_) + k(avg(u_old, u, alpha_f), u_)

## Create file
xdmf_file = XDMFFile(domain.comm, "output_xdmfs/rod_straight.xdmf", "w")
xdmf_file.write_mesh(domain)
xdmf_file.write_function(u, 0)

# Write function at each time step
tt=0
for i in range(Nsteps):
    tt += float(dt)

    bc_fun1=input1(tt)
    bc_fun2=input2(tt)

    bc1 = fem.dirichletbc(bc_fun1, point1_dofs,V)
    bc2 = fem.dirichletbc(bc_fun2, point2_dofs,V)

    allbcs=[bc1,bc2] #set BCs

    problem = fem.petsc.NonlinearProblem(res, u, bcs=allbcs) #problem setup

    ## Solver setup
    solver = dolfinx.nls.petsc.NewtonSolver(MPI.COMM_WORLD, problem)

    solver.atol = 1e-8
    solver.rtol = 1e-8
    solver.convergence_criterion = "incremental"

    ksp = solver.krylov_solver
    opts = PETSc.Options()
    option_prefix = ksp.getOptionsPrefix()
    opts[f"{option_prefix}ksp_type"] = "cg"
    opts[f"{option_prefix}pc_type"] = "gamg"

    ksp.setFromOptions()

    num_its, converged = solver.solve(u)

    assert converged
    
    u.x.scatter_forward()

    # Update old fields with new quantities
    update_fields(u, u_old, v_old, a_old)

    # Write the displacement to the XDMF file
    xdmf_file.write_function(u, tt)

xdmf_file.close()

if MPI.COMM_WORLD.rank==0:
    print("--- %s seconds runtime ---" % (time.time() - start_time))
## rest of code ##

## Time dep. boundary conditions
Amp=1e-8 #amplitude of perturbation

def point1(x):
    return np.logical_and(np.logical_and(np.isclose(x[2], 0.0), np.isclose(x[1], 0)), np.isclose(x[0], 0.05))
def point2(x):
    return np.logical_and(np.logical_and(np.isclose(x[2], 0.0), np.isclose(x[1], 0.1)), np.isclose(x[0], 0.05))

class input1class:
    def __init__(self):
        self.t=0.0
    
    def eval(self,x):
        return np.stack((np.full(1, Amp*self.t))), np.zeros(1),np.zeros(1)))
class input2class:
    def __init__(self):
        self.t=0.0
    
    def eval(self,x):
        return np.stack((np.full(1, -1*Amp*self.t))), np.zeros(1),np.zeros(1)))

input1_expr=input1class()
input1_expr.t=0.
bc_fun1=fem.Function(V)
bc_fun1.interpolate(input1_expr.eval)

input2_expr=input2class()
input2_expr.t=0.
bc_fun2=fem.Function(V)
bc_fun2.interpolate(input2_expr.eval)

point1_dofs = fem.locate_dofs_geometrical(V, point1) 
point2_dofs = fem.locate_dofs_geometrical(V, point2) 
 
bc1 = fem.dirichletbc(bc_fun1, point1_dofs)
bc2 = fem.dirichletbc(bc_fun2, point2_dofs)

allbcs=[bc1,bc2] #set BCs

## rest of code ##

for i in range(Nsteps):
    tt += float(dt)

    input1_expr.t=tt
    bc_fun1.interpolate(input1_expr.eval)

    input2_expr.t=tt
    bc_fun2.interpolate(input2_expr.eval)

    bc1 = fem.dirichletbc(bc_fun1, point1_dofs)
    bc2 = fem.dirichletbc(bc_fun2, point2_dofs)

    allbcs=[bc1,bc2]

    fem.set_bc(u.vector, allbcs)

    ## rest of code ##

I would recommend reading: Form compilation — FEniCS Tutorial @ Sorbonne

Thanks @dokken - I have got it working.

One further question: If now I want to, after the loading time of the time dep. BC, allow the system to evolve without boundary conditions. Do I need to set up a new problem or can I somehow update the bcs from the solver to “bcs=[ ]”?

You would have to update problem.bcs by calling problems.bcs = [], as there are sub-functions of `problem that is called in the Newton solve.r

Hi dokken, implementing this has a strange effect: the iteration where this change to no bcs occur requires much more computational effort (see below)

Time: 4.60000e-04, NLS its: 3, KSP its: 15, Solve time: 14.44191
Time: 4.70000e-04, NLS its: 3, KSP its: 15, Solve time: 13.50215
Time: 4.80000e-04, NLS its: 3, KSP its: 15, Solve time: 12.43476
Time: 4.90000e-04, NLS its: 3, KSP its: 15, Solve time: 12.61212
Time: 5.00000e-04, NLS its: 40, KSP its: 30, Solve time: 357.87519
Time: 5.10000e-04, NLS its: 3, KSP its: 15, Solve time: 15.50777
Time: 5.20000e-04, NLS its: 3, KSP its: 15, Solve time: 14.34820
Time: 5.30000e-04, NLS its: 3, KSP its: 16, Solve time: 15.23458

This was achieved by using a if statement that will be tripped one time in the loop:

if tt.value > Timp and Timpflag:
        problem.bcs = []
        Timpflag = False

A nonlinear problem is very dependent on the initial conditions and boundary condition, so I would expect a change in number of iterations. It is up to you to look at the solution and see if it drastically changes, how the residual decreases with the number of iterations etc

1 Like

Hi @dokken, thanks for the assistance with this. I just wanted to check that I am implementing the not precondition at every step correctly:

After the solver is defined

ksp = solver.krylov_solver
opts = PETSc.Options()
option_prefix = ksp.getOptionsPrefix()

#Options

ksp.setFromOptions()
ksp.getPC().setReusePreconditioner(True)

I have this statement in the loop:

if ksp.its > 15:
        ksp.getPC().reset()

However, the number of ksp.its doesn’t seem to go down after it hits 16, which indicates it is not preconditioning?

Thanks!

Please provide a minimal reproducible example, for instance solving a Poisson/heat equation with similar kind of boundary conditions as described above.

I think I resolved my own query. I assumed preconditioning at a step as opposed to not would reduce the number of linear iterations, but I guess this doesn’t necessarily have to be the case.

Hi @dokken,

I just had a thought: The solving of this problem involves more than one Non-linear solve iteration. Shouldn’t it only be one as the residual is a bilinear form in the test and trial function, effectively the same as in the elastodynamics demo?

I don’t really know what problem you are trying to solve, as you haven’t

  1. Written out the strong formulation of your problem
  2. Written the derivation into the corresponding weak form.

Without these, I do not have time to look into the details of your code an backward engineer 1 and 2.

My problem is exactly the same as in the demo (implemented in dolfinx as opposed to dolfin), but has no external forces and is ‘forced’ by the time dependent dirichlet BCs.

The only difference I can think of is the mesh I am considering, it is a cylindrical pipe rather than a cuboid, but this shouldn’t effect linearity?

Thanks!

It should not.
Please add a reproducible example with the print statements from:

as this has not been presented in this thread

Consider the following MWE with a cuboid:

#!/usr/bin/env python3

import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
from petsc4py import PETSc
from dolfinx import fem, mesh, default_scalar_type, log
from dolfinx.io import XDMFFile
import ufl
import dolfinx.fem.petsc
import dolfinx.nls.petsc
import time
start_time = time.time()

## Create mesh
L = 1.0  # Length of the rod
nx, ny, nz = 6, 6, 20  # Number of elements in x, y, and z directions
domain = mesh.create_box(MPI.COMM_WORLD, [np.array([0, 0, 0]), np.array(
    [0.1, 0.1, L])], [nx, ny, nz], cell_type=mesh.CellType.hexahedron)

## Create function spaces
V = fem.VectorFunctionSpace(domain, ("CG", 1))
V_von_mises = fem.FunctionSpace(domain, ("DG",0))
fdim = domain.topology.dim - 1

# Physical constants
E = 200e9
nu = 0.27
mu = E / (2.0 * (1 + nu))
lmbda = E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))
rho = 7990

# Time integration parameters
am = 0.2
af = 0.4
g = 0.5 + af - am
alpha_m = fem.Constant(domain, am)
alpha_f = fem.Constant(domain, af)
gamma = fem.Constant(domain, g)
beta = fem.Constant(domain, (g + 0.5)**2 / 4.0)

T = 5.0e-4
Timp = 5.0e-4
Nsteps = 200
dt = fem.Constant(domain, T / Nsteps)

## Time dep. boundary conditions
Amp=1e-5 #amplitude of perturbation

def point1(x):
    return np.logical_and(np.logical_and(np.isclose(x[2], 0.0), np.isclose(x[1], 0)), np.isclose(x[0], 0.05))
def point2(x):
    return np.logical_and(np.logical_and(np.isclose(x[2], 0.0), np.isclose(x[1], 0.1)), np.isclose(x[0], 0.05))
def input1(t):
    return np.array([Amp*np.sin(2*np.pi*t/Timp), 0.0,0.0], dtype=default_scalar_type)
def input2(t):
    return np.array([-1*Amp*np.sin(2*np.pi*t/Timp), 0.0,0.0], dtype=default_scalar_type)

bc_fun1=fem.Constant(domain, PETSc.ScalarType((0,0,0)))
bc_fun2=fem.Constant(domain, PETSc.ScalarType((0,0,0)))

point1_dofs = fem.locate_dofs_geometrical(V, point1) 
bc1 = fem.dirichletbc(bc_fun1, point1_dofs, V)

point2_dofs = fem.locate_dofs_geometrical(V, point2) 
bc2 = fem.dirichletbc(bc_fun2, point2_dofs, V)

allbcs=[bc1,bc2]

# Functions and fields
u_ = ufl.TestFunction(V)
u = fem.Function(V, name='Displacement')
u_old = fem.Function(V)
v_old = fem.Function(V)
a_old = fem.Function(V)

vm = fem.Function(V_von_mises, name='Von Mises Stress')

# Measures
dx = ufl.Measure("dx", domain=domain)

## GA method functions
def sigma(r):
    return lmbda*ufl.nabla_div(r)*ufl.Identity(len(r)) + 2*mu*ufl.sym(ufl.grad(r))
def m(u, u_):
    return rho*ufl.inner(u, u_)*dx
def k(u, u_):
    return ufl.inner(sigma(u), ufl.grad(u_))*dx

## Update variables
def update_a(u, u_old, v_old, a_old, ufl_=True):
    if ufl_:
        dt_ = dt
        beta_ = beta
        return (u-u_old-dt_*v_old)/beta_/dt_**2 - (1-2*beta_)/2/beta_*a_old

    else:
        dt_ = float(dt)
        beta_ = float(beta)
        return (u-u_old-dt_*v_old)/beta_/dt_**2 - (1-2*beta_)/2/beta_*a_old

def update_v(a, u_old, v_old, a_old, ufl_=True):
    if ufl_:
        return v_old + dt*((1-gamma)*a_old + gamma*a)
    else:
        return v_old + float(dt) * ((1 - float(gamma)) * a_old + float(gamma)*a)

def update_fields(u, u_old, v_old, a_old):
    u_vec, u0_vec = u.x.array[:], u_old.x.array[:]
    v0_vec, a0_vec = v_old.x.array[:], a_old.x.array[:]

    a_vec = update_a(u_vec, u0_vec, v0_vec, a0_vec, ufl_=False)
    v_vec = update_v(a_vec, u0_vec, v0_vec, a0_vec, ufl_=False)

    v_old.x.array[:] = v_vec     #numpy.ndarray stored inside dolfinx.fem.function.Function
    a_old.x.array[:] = a_vec
    u_old.x.array[:] = u_vec

def avg(x_old, x_new, alpha):
    return alpha*x_old + (1-alpha)*x_new

a_new = update_a(u, u_old, v_old, a_old, ufl_=True)     #ufl.algebra.Sum
v_new = update_v(a_new, u_old, v_old, a_old, ufl_=True)

## Residual to solve
res = m(avg(a_old, a_new, alpha_m), u_) + k(avg(u_old, u, alpha_f), u_)

problem = fem.petsc.NonlinearProblem(res, u, bcs=allbcs) #problem setup

## Solver setup
solver = dolfinx.nls.petsc.NewtonSolver(MPI.COMM_WORLD, problem)

solver.atol = 1e-13
solver.rtol = 1e-8
solver.convergence_criterion = "incremental"

ksp = solver.krylov_solver
opts = PETSc.Options()
option_prefix = ksp.getOptionsPrefix()
opts[f"{option_prefix}ksp_type"] = "gmres"
opts[f"{option_prefix}pc_type"] = "gamg"
opts[f"{option_prefix}pc_hypre_type"] = "boomeramg"
ksp.setFromOptions()
ksp.getPC().setReusePreconditioner(True)

# s = sigma(u) - 1. / 3 * ufl.tr(sigma(u)) * ufl.Identity(len(u))       #Von Mises Stresses
# von_Mises = ufl.sqrt(3. / 2 * ufl.inner(s, s))

# vm_expr = fem.Expression(von_Mises, V_von_mises.element.interpolation_points())
# vm.interpolate(vm_expr)

## Create file
xdmf_file = XDMFFile(domain.comm, "output_xdmfs/test.xdmf", "w")
xdmf_file.write_mesh(domain)
xdmf_file.write_function(u, 0)
# xdmf_file.write_function(vm, 0)

# Write function at each time step
tt=fem.Constant(domain, 0.)
cum_time = 0  
for i in range(Nsteps):
    tt.value += dt.value

    bc_fun1.value=input1(tt.value)
    bc_fun2.value=input2(tt.value)

    if MPI.COMM_WORLD.rank==0:
        s = time.perf_counter()
    
    num_its, converged = solver.solve(u)

    if MPI.COMM_WORLD.rank==0:
        e = time.perf_counter()
        cum_time += e-s
        print(f"Time: {tt.value:.5e}, NLS its: {num_its:d}, KSP its: {ksp.its:d}, Solve time: {e-s:.5f}, Cumulative solve time: {cum_time:.5f}")

    assert converged
    
    u.x.scatter_forward()

    # Update old fields with new quantities
    update_fields(u, u_old, v_old, a_old)

    # Write the displacement to the XDMF file
    xdmf_file.write_function(u, tt.value)
    
    # vm.interpolate(vm_expr)
    # xdmf_file.write_function(vm, tt.value)

xdmf_file.close()

if MPI.COMM_WORLD.rank==0:
    print("--- %s seconds runtime ---" % (time.time() - start_time))

Running this on my PC with 10 cores gives the following snippet:

Time: 4.90000e-04, NLS its: 3, KSP its: 5, Solve time: 0.02712, Cumulative solve time: 5.82327
Time: 4.92500e-04, NLS its: 3, KSP its: 5, Solve time: 0.02702, Cumulative solve time: 5.85029
Time: 4.95000e-04, NLS its: 3, KSP its: 5, Solve time: 0.02828, Cumulative solve time: 5.87857
Time: 4.97500e-04, NLS its: 3, KSP its: 5, Solve time: 0.03004, Cumulative solve time: 5.90861
Time: 5.00000e-04, NLS its: 3, KSP its: 5, Solve time: 0.02536, Cumulative solve time: 5.93397
— 9.547337532043457 seconds runtime —

Just adding:

    log.set_log_level(log.LogLevel.INFO)

to your code, and only considering the first iteration, you get that:

024-01-02 16:06:40.534 (   0.081s) [main            ]              petsc.cpp:698   INFO| PETSc Krylov solver starting to solve system.
2024-01-02 16:06:40.566 (   0.113s) [main            ]              petsc.cpp:698   INFO| PETSc Krylov solver starting to solve system.
2024-01-02 16:06:40.573 (   0.119s) [main            ]       NewtonSolver.cpp:36    INFO| Newton iteration 2: r (abs) = 8.66056e-13 (tol = 1e-13) r (rel) = 1.84551e-06(tol = 1e-08)
2024-01-02 16:06:40.587 (   0.134s) [main            ]              petsc.cpp:698   INFO| PETSc Krylov solver starting to solve system.
2024-01-02 16:06:40.594 (   0.141s) [main            ]       NewtonSolver.cpp:36    INFO| Newton iteration 3: r (abs) = 4.34119e-18 (tol = 1e-13) r (rel) = 9.25081e-12(tol = 1e-08)
2024-01-02 16:06:40.594 (   0.141s) [main            ]       NewtonSolver.cpp:255   INFO| Newton solver finished in 3 iterations and 15 linear solver iterations.
Time: 2.50000e-06, NLS its: 3, KSP its: 5, Solve time: 0.07594, Cumulative solve time: 0.07594

which menas that your problem is incredibly close to the actual solution after 1 iteration.
The fact that you are not seeing machine precision here, is like due to the fact that E is fairly large:

If you change the tolerances to:

solver.atol = 1e-9
solver.rtol = 1e-9

You will observe convergence in 2 iterations, which is expected when the convergence criterion is incremental.
If you change it to residual, and choose to use an exact solver for the non-linear problem (not an iterative solver), i.e.

solver.atol = 1e-9
solver.rtol = 1e-9
solver.convergence_criterion = "residual"

ksp = solver.krylov_solver
opts = PETSc.Options()
option_prefix = ksp.getOptionsPrefix()
opts[f"{option_prefix}ksp_type"] = "preonly"
opts[f"{option_prefix}pc_type"] = "lu"
opts[f"{option_prefix}pc_mat_factor_solver_type"] = "mumps"
ksp.setFromOptions()
ksp.getPC().setReusePreconditioner(True)

you will get convergence in 1 iteration.

If you use iterative solvers that only solve the problem up to a given tolerance, the iteration count will increase.

2 Likes

Thanks a lot! Is there likely to be issue with machine precision if the initial displacement amplitude is made significantly smaller?

There might, you always have to carefully scale your problems. There are several good books on this in literature, including

1 Like