Linear solver difference in demo-stokes

Hello FEniCS community,

I’m still quite new to FEniCS, and this post is more about conceptual questions. Upon adapting and reproducing the demo code (Stokes equations using Taylor-Hood elements — DOLFINx 0.10.0.0 documentation) on my own machine with DOLFINx 0.9.0 on Mac, I’m wondering what is causing the mixed direct solver to give such weird results. The Taylor-Hood elements are supposed to be stable regardless of discretization parameters, and I’m not even sure if the problem is about numerical stability. Certainly, the mixed direct solver still converges, but it converges to a wrong solution.

Does it mean the mixed direct solver should not be used for any Stokes problem? Or is there any way or criterion to verify if the result given by a mixed direct solver is reliable? Further, how can one know the nested solver or the blocked iterative solver is reliable? (Assume the numerical method is in principle well-posed) I understand the complications due to the variational problem is of the saddle point type, but could anyone help me relating that to the mixed solver and how that could cause the trouble demonstrated in the demo code?

A working example (though not minimal, my apologies) is below, though the question is merely conceptual. Any of your help is greatly appreciated!

from mpi4py import MPI

try:
    from petsc4py import PETSc

    import dolfinx

    if not dolfinx.has_petsc:
        print("This demo requires DOLFINx to be compiled with PETSc enabled.")
        exit(0)
except ModuleNotFoundError:
    print("This demo requires petsc4py.")
    exit(0)

import numpy as np

import ufl
from basix.ufl import element, mixed_element
from dolfinx import default_real_type, fem, la
from dolfinx.fem import (
    Constant,
    Function,
    dirichletbc,
    extract_function_spaces,
    form,
    functionspace,
    locate_dofs_topological,
)
from dolfinx.fem.petsc import assemble_matrix_block, assemble_vector_block
from dolfinx.io import XDMFFile
from dolfinx.mesh import CellType, create_rectangle, locate_entities_boundary
from dolfinx.plot import vtk_mesh
from ufl import div, dx, grad, inner

import pyvista

opts = PETSc.Options()
opts["mat_superlu_dist_iterrefine"] = True

# +
# Create mesh
msh = create_rectangle(
    MPI.COMM_WORLD, [np.array([0, 0]), np.array([1, 1])], [32, 32], CellType.triangle
)

P2 = element("Lagrange", msh.basix_cell(), 2, shape=(msh.geometry.dim,), dtype=default_real_type)
P1 = element("Lagrange", msh.basix_cell(), 1, dtype=default_real_type)
V, Q = functionspace(msh, P2), functionspace(msh, P1)

# Boundary conditions for the velocity field are defined:

# Function to mark x = 0, x = 1 and y = 0
def noslip_boundary(x):
    return np.isclose(x[0], 0.0) | np.isclose(x[0], 1.0) | np.isclose(x[1], 0.0)


# Function to mark the lid (y = 1)
def lid(x):
    return np.isclose(x[1], 1.0)


# Lid velocity
def lid_velocity_expression(x):
    return np.stack((np.ones(x.shape[1]), np.zeros(x.shape[1])))

# +
# No-slip condition on boundaries where x = 0, x = 1, and y = 0
noslip = np.zeros(msh.geometry.dim, dtype=PETSc.ScalarType)  # type: ignore
facets = locate_entities_boundary(msh, 1, noslip_boundary)
bc0 = dirichletbc(noslip, locate_dofs_topological(V, 1, facets), V)

# Driving (lid) velocity condition on top boundary (y = 1)
lid_velocity = Function(V)
lid_velocity.interpolate(lid_velocity_expression)
facets = locate_entities_boundary(msh, 1, lid)
bc1 = dirichletbc(lid_velocity, locate_dofs_topological(V, 1, facets))

# Collect Dirichlet boundary conditions
bcs = [bc0, bc1]
# -

# The bilinear and linear forms for the Stokes equations are defined
# using a a blocked structure:

# +
# Define variational problem
(u, p) = ufl.TrialFunction(V), ufl.TrialFunction(Q)
(v, q) = ufl.TestFunction(V), ufl.TestFunction(Q)
f = Constant(msh, (PETSc.ScalarType(0), PETSc.ScalarType(0)))  # type: ignore

a = form([[inner(grad(u), grad(v)) * dx, inner(p, div(v)) * dx], [inner(div(u), q) * dx, None]])
L = form([inner(f, v) * dx, inner(Constant(msh, PETSc.ScalarType(0)), q) * dx])  # type: ignore
# -

# A block-diagonal preconditioner will be used with the iterative
# solvers for this problem:

a_p11 = form(inner(p, q) * dx)
a_p = [[a[0][0], None], [None, a_p11]]

def view_flow(V, u, msh):

    # pyvista.start_xvfb()
    topology, cell_types, geometry = vtk_mesh(V)
    values = np.zeros((geometry.shape[0], 3), dtype=np.float64)
    values[:, :len(u)] = u.x.array.real.reshape((geometry.shape[0], len(u)))

    # Create a point cloud of glyphs
    function_grid = pyvista.UnstructuredGrid(topology, cell_types, geometry)
    function_grid["u"] = values
    glyphs = function_grid.glyph(orient="u", factor=0.5)

    # Create a pyvista-grid for the mesh
    msh.topology.create_connectivity(msh.topology.dim, msh.topology.dim)
    grid = pyvista.UnstructuredGrid(*vtk_mesh(msh, msh.topology.dim))

    # Create plotter
    plotter = pyvista.Plotter()
    plotter.add_mesh(grid, style="wireframe", color="k")
    plotter.add_mesh(glyphs)
    plotter.view_xy()
    if not pyvista.OFF_SCREEN:
        plotter.show()
    else:
        fig_as_array = plotter.screenshot("glyphs.png")

def nested_iterative_solver():
    """Solve the Stokes problem using nest matrices and an iterative solver."""

    # Assemble nested matrix operators
    A = fem.petsc.assemble_matrix_nest(a, bcs=bcs)
    A.assemble()

    # Create a nested matrix P to use as the preconditioner. The
    # top-left block of P is shared with the top-left block of A. The
    # bottom-right diagonal entry is assembled from the form a_p11:
    P11 = fem.petsc.assemble_matrix(a_p11, [])
    P = PETSc.Mat().createNest([[A.getNestSubMatrix(0, 0), None], [None, P11]])
    P.assemble()

    A00 = A.getNestSubMatrix(0, 0)
    A00.setOption(PETSc.Mat.Option.SPD, True)

    P00, P11 = P.getNestSubMatrix(0, 0), P.getNestSubMatrix(1, 1)
    P00.setOption(PETSc.Mat.Option.SPD, True)
    P11.setOption(PETSc.Mat.Option.SPD, True)

    # Assemble right-hand side vector
    b = fem.petsc.assemble_vector_nest(L)

    # Modify ('lift') the RHS for Dirichlet boundary conditions
    fem.petsc.apply_lifting_nest(b, a, bcs=bcs)

    # Sum contributions for vector entries that are share across
    # parallel processes
    for b_sub in b.getNestSubVecs():
        b_sub.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE)

    # Set Dirichlet boundary condition values in the RHS vector
    bcs0 = fem.bcs_by_block(extract_function_spaces(L), bcs)
    fem.petsc.set_bc_nest(b, bcs0)

    # The pressure field is determined only up to a constant. We supply
    # a vector that spans the nullspace to the solver, and any component
    # of the solution in this direction will be eliminated during the
    # solution process.
    null_vec = fem.petsc.create_vector_nest(L)

    # Set velocity part to zero and the pressure part to a non-zero
    # constant
    null_vecs = null_vec.getNestSubVecs()
    null_vecs[0].set(0.0), null_vecs[1].set(1.0)

    # Normalize the vector that spans the nullspace, create a nullspace
    # object, and attach it to the matrix
    null_vec.normalize()
    nsp = PETSc.NullSpace().create(vectors=[null_vec])
    assert nsp.test(A)
    A.setNullSpace(nsp)

    # Create a MINRES Krylov solver and a block-diagonal preconditioner
    # using PETSc's additive fieldsplit preconditioner
    ksp = PETSc.KSP().create(msh.comm)
    ksp.setOperators(A, P)
    ksp.setType("minres")
    ksp.setTolerances(rtol=1e-9)
    ksp.getPC().setType("fieldsplit")
    ksp.getPC().setFieldSplitType(PETSc.PC.CompositeType.ADDITIVE)

    # Define the matrix blocks in the preconditioner with the velocity
    # and pressure matrix index sets
    nested_IS = P.getNestISs()
    ksp.getPC().setFieldSplitIS(("u", nested_IS[0][0]), ("p", nested_IS[0][1]))

    # Set the preconditioners for each block. For the top-left
    # Laplace-type operator we use algebraic multigrid. For the
    # lower-right block we use a Jacobi preconditioner. By default, GAMG
    # will infer the correct near-nullspace from the matrix block size.
    ksp_u, ksp_p = ksp.getPC().getFieldSplitSubKSP()
    ksp_u.setType("preonly")
    ksp_u.getPC().setType("gamg")
    ksp_p.setType("preonly")
    ksp_p.getPC().setType("jacobi")

    # Create finite element {py:class}`Function <dolfinx.fem.Function>`s
    # for the velocity (on the space `V`) and for the pressure (on the
    # space `Q`). The vectors for `u` and `p` are combined to form a
    # nested vector and the system is solved.
    u, p = Function(V), Function(Q)
    x = PETSc.Vec().createNest([la.create_petsc_vector_wrap(u.x), la.create_petsc_vector_wrap(p.x)])
    ksp.solve(b, x)

    view_flow(V, u, msh)

    # Compute norms of the solution vectors
    norm_u = la.norm(u.x)
    norm_p = la.norm(p.x)
    if MPI.COMM_WORLD.rank == 0:
        print(f"(A) Norm of velocity coefficient vector (nested, iterative): {norm_u}")
        print(f"(A) Norm of pressure coefficient vector (nested, iterative): {norm_p}")

    return norm_u, norm_p

def block_operators():
    """Return block operators and block RHS vector for the Stokes
    problem"""

    # Assembler matrix operator, preconditioner and RHS vector into
    # single objects but preserving block structure
    A = assemble_matrix_block(a, bcs=bcs)
    A.assemble()
    P = assemble_matrix_block(a_p, bcs=bcs)
    P.assemble()
    b = assemble_vector_block(L, a, bcs=bcs)

    # Set the nullspace for pressure (since pressure is determined only
    # up to a constant)
    null_vec = A.createVecLeft()
    offset = V.dofmap.index_map.size_local * V.dofmap.index_map_bs
    null_vec.array[offset:] = 1.0
    null_vec.normalize()
    nsp = PETSc.NullSpace().create(vectors=[null_vec])
    assert nsp.test(A)
    A.setNullSpace(nsp)

    return A, P, b


# The following function solves the Stokes problem using a
# block-diagonal preconditioner and monolithic PETSc matrices.


def block_iterative_solver():
    """Solve the Stokes problem using blocked matrices and an iterative
    solver."""

    # Assembler the operators and RHS vector
    A, P, b = block_operators()

    # Build PETSc index sets for each field (global dof indices for each
    # field)
    V_map = V.dofmap.index_map
    Q_map = Q.dofmap.index_map
    offset_u = V_map.local_range[0] * V.dofmap.index_map_bs + Q_map.local_range[0]
    offset_p = offset_u + V_map.size_local * V.dofmap.index_map_bs
    is_u = PETSc.IS().createStride(
        V_map.size_local * V.dofmap.index_map_bs, offset_u, 1, comm=PETSc.COMM_SELF
    )
    is_p = PETSc.IS().createStride(Q_map.size_local, offset_p, 1, comm=PETSc.COMM_SELF)

    # Create a MINRES Krylov solver and a block-diagonal preconditioner
    # using PETSc's additive fieldsplit preconditioner
    ksp = PETSc.KSP().create(msh.comm)
    ksp.setOperators(A, P)
    ksp.setTolerances(rtol=1e-9)
    ksp.setType("minres")
    ksp.getPC().setType("fieldsplit")
    ksp.getPC().setFieldSplitType(PETSc.PC.CompositeType.ADDITIVE)
    ksp.getPC().setFieldSplitIS(("u", is_u), ("p", is_p))

    # Configure velocity and pressure sub-solvers
    ksp_u, ksp_p = ksp.getPC().getFieldSplitSubKSP()
    ksp_u.setType("preonly")
    ksp_u.getPC().setType("gamg")
    ksp_p.setType("preonly")
    ksp_p.getPC().setType("jacobi")

    # The matrix A combined the vector velocity and scalar pressure
    # parts, hence has a block size of 1. Unlike the MatNest case, GAMG
    # cannot infer the correct near-nullspace from the matrix block
    # size. Therefore, we set block size on the top-left block of the
    # preconditioner so that GAMG can infer the appropriate near
    # nullspace.
    ksp.getPC().setUp()
    Pu, _ = ksp_u.getPC().getOperators()
    Pu.setBlockSize(msh.topology.dim)

    # Create a block vector (x) to store the full solution and solve
    x = A.createVecRight()
    ksp.solve(b, x)

    # Create Functions to split u and p
    u, p = Function(V), Function(Q)
    offset = V_map.size_local * V.dofmap.index_map_bs
    u.x.array[:offset] = x.array_r[:offset]
    p.x.array[: (len(x.array_r) - offset)] = x.array_r[offset:]

    view_flow(V, u, msh)

    # Compute the $L^2$ norms of the solution vectors
    norm_u, norm_p = la.norm(u.x), la.norm(p.x)
    if MPI.COMM_WORLD.rank == 0:
        print(f"(B) Norm of velocity coefficient vector (blocked, iterative): {norm_u}")
        print(f"(B) Norm of pressure coefficient vector (blocked, iterative): {norm_p}")

    return norm_u, norm_p

def block_direct_solver():
    """Solve the Stokes problem using blocked matrices and a direct
    solver."""

    # Assembler the block operator and RHS vector
    A, _, b = block_operators()

    # Create a solver
    ksp = PETSc.KSP().create(msh.comm)
    ksp.setOperators(A)
    ksp.setType("preonly")

    # Set the solver type to MUMPS (LU solver) and configure MUMPS to
    # handle pressure nullspace
    pc = ksp.getPC()
    pc.setType("lu")
    pc.setFactorSolverType("superlu_dist")
    try:
        pc.setFactorSetUpSolverType()
    except PETSc.Error as e:
        if e.ierr == 92:
            print("The required PETSc solver/preconditioner is not available. Exiting.")
            print(e)
            exit(0)
        else:
            raise e
    # pc.getFactorMatrix().setMumpsIcntl(icntl=24, ival=1)  # For pressure nullspace
    # pc.getFactorMatrix().setMumpsIcntl(icntl=25, ival=0)  # For pressure nullspace

    # Create a block vector (x) to store the full solution, and solve
    x = A.createVecLeft()
    ksp.solve(b, x)

    # Create Functions and scatter x solution
    u, p = Function(V), Function(Q)
    offset = V.dofmap.index_map.size_local * V.dofmap.index_map_bs
    u.x.array[:offset] = x.array_r[:offset]
    p.x.array[: (len(x.array_r) - offset)] = x.array_r[offset:]

    view_flow(V, u, msh)

    # Compute the $L^2$ norms of the u and p vectors
    norm_u, norm_p = la.norm(u.x), la.norm(p.x)
    if MPI.COMM_WORLD.rank == 0:
        print(f"(C) Norm of velocity coefficient vector (blocked, direct): {norm_u}")
        print(f"(C) Norm of pressure coefficient vector (blocked, direct): {norm_p}")

    return norm_u, norm_p

def mixed_direct():
    # Create the Taylot-Hood function space
    TH = mixed_element([P2, P1])
    W = functionspace(msh, TH)

    # No slip boundary condition
    W0 = W.sub(0)
    Q, _ = W0.collapse()
    noslip = Function(Q)
    facets = locate_entities_boundary(msh, 1, noslip_boundary)
    dofs = locate_dofs_topological((W0, Q), 1, facets)
    bc0 = dirichletbc(noslip, dofs, W0)

    # Driving velocity condition u = (1, 0) on top boundary (y = 1)
    lid_velocity = Function(Q)
    lid_velocity.interpolate(lid_velocity_expression)
    facets = locate_entities_boundary(msh, 1, lid)
    dofs = locate_dofs_topological((W0, Q), 1, facets)
    bc1 = dirichletbc(lid_velocity, dofs, W0)

    # Collect Dirichlet boundary conditions
    bcs = [bc0, bc1]

    # Define variational problem
    (u, p) = ufl.TrialFunctions(W)
    (v, q) = ufl.TestFunctions(W)
    f = Function(Q)
    a = form((inner(grad(u), grad(v)) + inner(p, div(v)) + inner(div(u), q)) * dx)
    L = form(inner(f, v) * dx)

    # Assemble LHS matrix and RHS vector
    A = fem.petsc.assemble_matrix(a, bcs=bcs)
    A.assemble()
    b = fem.petsc.assemble_vector(L)

    fem.petsc.apply_lifting(b, [a], bcs=[bcs])
    b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE)

    # Set Dirichlet boundary condition values in the RHS
    for bc in bcs:
        bc.set(b)

    # Create and configure solver
    ksp = PETSc.KSP().create(msh.comm)
    ksp.setOperators(A)
    ksp.setType("preonly")

    # Configure MUMPS to handle pressure nullspace
    pc = ksp.getPC()
    pc.setType("lu")
    # pc.setFactorSolverType("mumps")
    # pc.setFactorSetUpSolverType()
    # pc.getFactorMatrix().setMumpsIcntl(icntl=24, ival=1)
    # pc.getFactorMatrix().setMumpsIcntl(icntl=25, ival=0)

    pc.setFactorSolverType("superlu_dist")

    # Compute the solution
    U = Function(W)
    try:
        ksp.solve(b, U.x.petsc_vec)
    except PETSc.Error as e:
        if e.ierr == 92:
            print("The required PETSc solver/preconditioner is not available. Exiting.")
            print(e)
            exit(0)
        else:
            raise e

    # Split the mixed solution and collapse
    u, p = U.sub(0).collapse(), U.sub(1).collapse()

    view_flow(V, u, msh)

    # Compute norms
    norm_u, norm_p = la.norm(u.x), la.norm(p.x)
    if MPI.COMM_WORLD.rank == 0:
        print(f"(D) Norm of velocity coefficient vector (monolithic, direct): {norm_u}")
        print(f"(D) Norm of pressure coefficient vector (monolithic, direct): {norm_p}")

    return norm_u, norm_u


# Solve using PETSc MatNest
norm_u_0, norm_p_0 = nested_iterative_solver()

# Solve using PETSc block matrices and an iterative solver
norm_u_1, norm_p_1 = block_iterative_solver()
np.testing.assert_allclose(norm_u_1, norm_u_0, rtol=1e-4)
np.testing.assert_allclose(norm_u_1, norm_u_0, rtol=1e-4)

# Solve using PETSc block matrices and an LU solver
norm_u_2, norm_p_2 = block_direct_solver()
np.testing.assert_allclose(norm_u_2, norm_u_0, rtol=1e-4)
np.testing.assert_allclose(norm_p_2, norm_p_0, rtol=1e-4)

# Solve using a non-blocked matrix and an LU solver
norm_u_3, norm_p_3 = mixed_direct()
np.testing.assert_allclose(norm_u_3, norm_u_0, rtol=1e-4)

Could you elaborate on how you have adapted the demo? Since your example is not minimal it requires quite an effort to parse it.

If your problem has no non-Dirichlet bc boundaries for velocity, you will have a pressure nullspace that you must handle. One can configure mumps to handle that for you. However, if you use superlu_dist, you have to do it yourself.

You have not specified what weird results your are getting. Is the velocity wrong? Or the pressure? Or both?

You can also add seterrorifnotconverged to true to ensure that you capture any error messages from the direct solver. See for instance Code search results · GitHub

You can check if the residual is zero post solving.

Thank you for your response!

My adaptation is merely making the demo code running on my machine, most of them are just changing fem.petsc.assemble_matrix(a, bcs=bcs, kind="nest") to fem.petsc.assemble_matrix_nest(a, bcs=bcs) so my DOLFINx 0.9.0 is compatible with it, together with a helper function view_flow() to visualize the flow velocity in pyvista for inspection.

The results of the demo, plotted with PyVista, as shown below,


which, I think is the main point of the demo, that we need to handle the pressure nullspace. Thank you for the explanation!

For this, you could simply pick up the version released with 0.9:

Since the issue seems to be the visualization. Could you please reduce the example to only consider the one solver that gives the «weird» solution, as I think this is due to your plotter, not the solver.

Could you try storing the solution and visualize it with Paraview?

Thank you again for the fast reply! The MWE with the mixed direct solver together with the plotter is now below

from mpi4py import MPI
from petsc4py import PETSc
import numpy as np

import ufl
from dolfinx.fem.petsc import assemble_matrix, assemble_vector, apply_lifting
from basix.ufl import element, mixed_element
from dolfinx import default_real_type, fem
from dolfinx.fem import (
    Function,
    dirichletbc,
    form,
    functionspace,
    locate_dofs_topological,
)
from dolfinx.mesh import CellType, create_rectangle, locate_entities_boundary
from dolfinx.plot import vtk_mesh
from ufl import div, dx, grad, inner

import pyvista

opts = PETSc.Options()
opts["mat_superlu_dist_iterrefine"] = True

def view_flow(V, u, msh):

    # pyvista.start_xvfb()
    topology, cell_types, geometry = vtk_mesh(V)
    values = np.zeros((geometry.shape[0], 3), dtype=np.float64)
    values[:, :len(u)] = u.x.array.real.reshape((geometry.shape[0], len(u)))

    # Create a point cloud of glyphs
    function_grid = pyvista.UnstructuredGrid(topology, cell_types, geometry)
    function_grid["u"] = values
    glyphs = function_grid.glyph(orient="u", factor=0.2)

    # Create a pyvista-grid for the mesh
    msh.topology.create_connectivity(msh.topology.dim, msh.topology.dim)
    grid = pyvista.UnstructuredGrid(*vtk_mesh(msh, msh.topology.dim))

    # Create plotter
    plotter = pyvista.Plotter()
    plotter.add_mesh(grid, style="wireframe", color="k")
    plotter.add_mesh(glyphs)
    plotter.view_xy()
    if not pyvista.OFF_SCREEN:
        plotter.save_graphic("glyphs.pdf")
        plotter.show()
    else:
        fig_as_array = plotter.screenshot("glyphs.png")

# +
# Create mesh
msh = create_rectangle(
    MPI.COMM_WORLD, [np.array([0, 0]), np.array([1, 1])], [32, 32], CellType.triangle
)

P2 = element("Lagrange", msh.basix_cell(), 2, shape=(msh.geometry.dim,), dtype=default_real_type)
P1 = element("Lagrange", msh.basix_cell(), 1, dtype=default_real_type)

# Create the Taylot-Hood function space
TH = mixed_element([P2, P1])
W = functionspace(msh, TH)

# Boundary conditions for the velocity field are defined:

# Function to mark x = 0, x = 1 and y = 0
def noslip_boundary(x):
    return np.isclose(x[0], 0.0) | np.isclose(x[0], 1.0) | np.isclose(x[1], 0.0)

# Function to mark the lid (y = 1)
def lid(x):
    return np.isclose(x[1], 1.0)

# Lid velocity
def lid_velocity_expression(x):
    return np.stack((np.ones(x.shape[1]), np.zeros(x.shape[1])))

# No slip boundary condition
W0 = W.sub(0)
Q, _ = W0.collapse()
noslip = Function(Q)
facets = locate_entities_boundary(msh, 1, noslip_boundary)
dofs = locate_dofs_topological((W0, Q), 1, facets)
bc0 = dirichletbc(noslip, dofs, W0)

# Driving velocity condition u = (1, 0) on top boundary (y = 1)
lid_velocity = Function(Q)
lid_velocity.interpolate(lid_velocity_expression)
facets = locate_entities_boundary(msh, 1, lid)
dofs = locate_dofs_topological((W0, Q), 1, facets)
bc1 = dirichletbc(lid_velocity, dofs, W0)

# Collect Dirichlet boundary conditions
bcs = [bc0, bc1]

# Define variational problem
(u, p) = ufl.TrialFunctions(W)
(v, q) = ufl.TestFunctions(W)
f = Function(Q)
a = form((inner(grad(u), grad(v)) + inner(p, div(v)) + inner(div(u), q)) * dx)
L = form(inner(f, v) * dx)

# Assemble LHS matrix and RHS vector
A = assemble_matrix(a, bcs=bcs)
A.assemble()
b = assemble_vector(L)

apply_lifting(b, [a], bcs=[bcs])
b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE)

# Set Dirichlet boundary condition values in the RHS
for bc in bcs:
    bc.set(b)

# Create and configure solver
ksp = PETSc.KSP().create(msh.comm)
ksp.setOperators(A)
ksp.setType("preonly")

# Configure MUMPS to handle pressure nullspace
pc = ksp.getPC()
pc.setType("lu")
# pc.setFactorSolverType("mumps")
# pc.setFactorSetUpSolverType()
# pc.getFactorMatrix().setMumpsIcntl(icntl=24, ival=1)
# pc.getFactorMatrix().setMumpsIcntl(icntl=25, ival=0)

pc.setFactorSolverType("superlu_dist")

# Compute the solution
U = Function(W)
ksp.solve(b, U.x.petsc_vec)

# Split the mixed solution and collapse
u, p = U.sub(0).collapse(), U.sub(1).collapse()

view_flow(Q, u, msh)

now it does not have the «weird» solution.


I realized that the problem is, as you pointed out, with the plotter: view_flow(Q, u, msh) with Q needs to be the correct collapsed subspace of the mixed element formulation
Q, _ = W.sub(0).collapse(). I was mistaking it with another function space V in the other solvers. Thank you so much for the help!