Hi,
I am trying to introduce spatial transform in a domain. This causes the solver to become extremely slow in compiling or assembly stage. I will appreciate if someone knowledgeable could help me understand how to possibly define the problem so as to avoid it.
Below is an MWE executed in dolfinx 0.8 installed in WSL through conda:
import dolfinx, gmsh, ufl, mpi4py, time
from dolfinx.io import gmshio
import numpy as np
from dolfinx.fem.petsc import LinearProblem
def magnetic_potential_model(mesh, ct, ft, space_transform=False):
# gmsh domain tags, by visible inspection
magnet_tag, shell_tag, vacuum_tag = 1, 2, 3
Omega = dolfinx.fem.functionspace(mesh, ("CG", fem_degree))
u, v = ufl.TrialFunction(Omega), ufl.TestFunction(Omega)
dx = ufl.Measure('dx', domain=mesh, subdomain_data=ct)
F = ufl.dot(ufl.grad(u), ufl.grad(v))*dx((magnet_tag,vacuum_tag)) # whole domain except the outer shell
# add remnanet flux
F += -ufl.dot(ufl.as_vector((0, 1, 0)), ufl.grad(v))*dx(magnet_tag) # only inside the magnet
if space_transform:
x, y, z = ufl.SpatialCoordinate(mesh)
r = ufl.sqrt(x**2 + y**2)
grad_x = lambda u, jac, r_: (1/jac*x**2/r**2 + y**2/(r*r_))*u.dx(0) + (1/jac*x*y/r**2 - x*y/(r*r_))*u.dx(1)
grad_y = lambda u, jac, r_: (1/jac*x*y/r**2 - x*y/(r*r_))*u.dx(0) + (x**2/(r*r_) + 1/jac*y**2/r**2)*u.dx(1)
r0_ie = fov_r - t_shell
# IE: polynomial stretching: r_ = r0 + exi**beta*L
beta = 3
alpha = 1000*t_shell
exi = (r-r0_ie)/t_shell
r_ = r0_ie + (exi**beta)*alpha
jac = alpha*beta/t_shell*exi**(beta-1)
# gradients in the layer
grad_x_u = grad_x(u, jac, r_)
grad_y_u = grad_y(u, jac, r_)
grad_x_test = grad_x(v, jac, r_)
grad_y_test = grad_y(v, jac, r_)
grad_ie_trial = ufl.as_vector((grad_x_u, grad_y_u))
grad_ie_test = ufl.as_vector((grad_x_test, grad_y_test))
F += ufl.dot(grad_ie_trial, grad_ie_test)*jac*(r_/r)*dx(shell_tag)
else:
F += ufl.dot(ufl.grad(u), ufl.grad(v))*dx((shell_tag))
a = ufl.lhs(F)
L = ufl.rhs(F)
mesh.topology.create_connectivity(mesh.topology.dim-1, mesh.topology.dim)
boundary_facets = dolfinx.mesh.exterior_facet_indices(mesh.topology)
boundary_dofs = dolfinx.fem.locate_dofs_topological(Omega, mesh.topology.dim-1, boundary_facets)
u_dbc = dolfinx.fem.Function(Omega)
u_dbc.x.array[:] = 0.0
dbc = [dolfinx.fem.dirichletbc(u_dbc, boundary_dofs)]
## solve
petsc_options={"ksp_type": "preonly", "pc_type": "lu", "pc_factor_mat_solver_type":"mumps"}
linear_problem = LinearProblem(a, L, bcs = dbc, petsc_options=petsc_options)
uh = linear_problem.solve()
uh.x.scatter_forward()
print("Solved for {} dofs. Min, max of the solved potential is {}, {}".format(uh.x.array.size, uh.x.array.min(), uh.x.array.max()))
return uh
fov_r = 1
t_shell = 0.1
gdim = 3
fem_degree = 2
gmsh.initialize()
gmsh.option.setNumber("General.Terminal", 0)
gmsh.clear()
occ = gmsh.model.occ
mag = occ.add_box(-0.5, -0.25, -0.25, 1, 0.5, 0.5)
fov = occ.add_cylinder(0, 0, -0.5, 0, 0, 1, fov_r)
shell = occ.add_cylinder(0, 0, -0.5, 0, 0, 1, fov_r-t_shell)
frag, _ = occ.fragment([(gdim, fov)], [(gdim, mag), (gdim, shell)])
occ.synchronize()
all_doms = occ.get_entities(gdim)
for j, dom in enumerate(all_doms):
gmsh.model.addPhysicalGroup(dom[0], [dom[1]], j + 1) # create the main group/node
all_edges = gmsh.model.getEntities(gdim - 1)
for j, edge in enumerate(all_edges):
gmsh.model.addPhysicalGroup(edge[0], [edge[1]], edge[1]) # create the main group/node
gmsh.model.mesh.generate(gdim)
gmsh.model.mesh.refine()
# gmsh.fltk.run()
mesh, ct, ft = gmshio.model_to_mesh(gmsh.model, mpi4py.MPI.COMM_WORLD, 0, gdim)
start_time = time.time()
uh = magnetic_potential_model(mesh, ct, ft, space_transform=False)
print("Solve time without spatial transform: {}".format(time.time()-start_time))
start_time = time.time()
uh_st = magnetic_potential_model(mesh, ct, ft, space_transform=True)
print("Solve time with spatial transform: {}".format(time.time()-start_time))
This leads to the following output on my device:
Solve time without spatial transform: 3.008222818374634s
Solve time with spatial transform: 124.13119649887085s