Here’s an example. It moves the mesh-independent definitions and code generation outside of the loop over meshes. The two loops below do the same thing, but you’ll that the first is much faster (about 50x faster on my computer).
It’s a bit messy from Python because we don’t expose a number C++ convenience functions in Python since the DOLFINx Python code takes care of some of the data marshalling before going into the underlying C++ layer. It could be made simpler.
import time
import basix
import dolfinx.fem.petsc
import ufl
from dolfinx import cpp as _cpp
from dolfinx import fem, jit, mesh
from mpi4py import MPI
from ufl import dx, grad, inner
# Create meshes
msh0 = mesh.create_rectangle(comm=MPI.COMM_WORLD,
points=((0.0, 0.0), (2.0, 1.0)), n=(32, 16),
cell_type=mesh.CellType.triangle)
msh1 = mesh.create_rectangle(comm=MPI.COMM_WORLD,
points=((0.0, 0.0), (2.0, 1.0)), n=(32, 16),
cell_type=mesh.CellType.triangle)
# Create abstract UFL elements, meshes, spaces and forms
e = basix.ufl.element("Lagrange", "triangle", 1)
coord_element = basix.ufl.element("Lagrange", "triangle", 1, rank=1)
mesh_ufl = ufl.Mesh(coord_element)
V_ufl = ufl.FunctionSpace(mesh_ufl, e)
u, v = ufl.TrialFunction(V_ufl), ufl.TestFunction(V_ufl)
a = inner(grad(u), grad(v)) * dx
# Just-in time compile the UFL form
ufcx_form, module_form, code = jit.ffcx_jit(MPI.COMM_WORLD, a)
form_ptr = module_form.ffi.cast("uintptr_t", module_form.ffi.addressof(ufcx_form))
# Just-in time compile the UFL element and dofmap data
(ufcx_element, ufcx_dofmap), module_element, code = jit.ffcx_jit(MPI.COMM_WORLD, e)
# Create C++ element from JIT-ed element
element_cpp = _cpp.fem.FiniteElement_float64(
module_element.ffi.cast("uintptr_t", module_element.ffi.addressof(ufcx_element)))
# Pointer (integer) to compiled dofmap data
dofmap_ptr = module_element.ffi.cast("uintptr_t", module_element.ffi.addressof(ufcx_dofmap))
t0 = time.time()
meshes = 10*[msh0._cpp_object, msh1._cpp_object]
for msh in meshes:
# Create C++ dofmap for current mesh
cpp_dofmap = _cpp.fem.create_dofmap(
MPI.COMM_WORLD, dofmap_ptr, msh.topology, element_cpp)
# Create C++ function space for current mesh (holder for a mesh, element and dofmap)
Vcpp = _cpp.fem.FunctionSpace_float64(msh, element_cpp, cpp_dofmap)
# Create C++ bilinear form
a_cpp = _cpp.fem.Form_float64(form_ptr, spaces=[Vcpp, Vcpp],
coefficients=[], constants=[], subdomains={}, mesh=None)
# Create PETSc sparse matrix
A = _cpp.fem.petsc.create_matrix(a_cpp)
# Pack coefficient (empty in this case) and assemble matrix
coeffs = _cpp.fem.pack_coefficients(a_cpp)
_cpp.fem.petsc.assemble_matrix(A=A, a=a_cpp, constants=[], coeffs=coeffs, bcs=[])
A.assemble()
# print("Norm (new):", A.norm())
t1 = time.time()
print("Time (A):", t1-t0)
# Reference case
t0 = time.time()
P1 = basix.ufl.element("Lagrange", msh0.basix_cell(), 1)
meshes = 10*[msh0, msh1]
for msh in meshes:
V = fem.FunctionSpace(msh, P1)
u, v = ufl.TrialFunction(V), ufl.TestFunction(V)
a = fem.form(inner(grad(u), grad(v)) * dx)
# Create PETSc sparse matrix
A = dolfinx.fem.petsc.assemble_matrix(a)
A.assemble()
# print("Norm (ref):", A.norm())
t1 = time.time()
print("Time (B):", t1-t0)