The mesh has to be associated with the checkpoint file. You are currently not doing that.
Note that I just made a release (v0.7.2 as of 10 minutes ago) supporting multi-function and time-stepping in checkpoints.
I will try to illustrate how I would use adios4dolfinx
on a slightly smaller example, using ipythonparallel
to use different processes to read and write.
See: Demo of capabilities · Issue #54 · jorgensd/adios4dolfinx · GitHub
or
"""Usage of ADIOS4DOLFINx
Author: Jørgen S. Dokken
SPDX-License-Identifier: MIT
"""
import ipyparallel as ipp
from pathlib import Path
from typing import Tuple, Callable
import numpy as np
def f(x):
return x[0]**2+x[1]
checkpoint_0 = Path("checkpoint_0.bp")
checkpoint_1 = Path("checkpoint_1.bp")
el = ("Lagrange", 3)
def create_mesh(path: Path, N: int):
"""
Given a path and a number of elements in each direction, create a mesh and write it to file
"""
from mpi4py import MPI
import dolfinx
import adios4dolfinx
mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, N, N)
adios4dolfinx.write_mesh(mesh, path, engine="BP4")
print(
f"Mesh written to {path} on comm {mesh.comm.rank+1}/{mesh.comm.size}", flush=True)
cluster = ipp.Cluster(engines="mpi", n=3)
rc = cluster.start_and_connect_sync()
# This has started to processes that can execute code with a MPI communicator.
# We create a mesh and write a checkpoint with 3 processors
query = rc[:].apply_async(create_mesh, checkpoint_0, 10)
query.wait()
assert query.successful(), query.error
for msg in query.stdout:
print(msg)
cluster.stop_cluster_sync()
# Next we create a new cluster with 2 processors and read the checkpoint
def read_mesh_and_create_function(path: Path, path_out: Path, el: Tuple[str, str], f: Callable[[np.ndarray], np.ndarray]):
"""
Given a mesh input file, read the mesh and create a function and write it to a new file.
The function is put in the function space defined by el and f is the function to interpolate
"""
from mpi4py import MPI
import dolfinx
import adios4dolfinx
mesh = adios4dolfinx.read_mesh(
MPI.COMM_WORLD, path, engine="BP4", ghost_mode=dolfinx.mesh.GhostMode.shared_facet)
print(
f"Mesh read from {path} on comm {mesh.comm.rank+1}/{mesh.comm.size}", flush=True)
V = dolfinx.fem.functionspace(mesh, el)
u = dolfinx.fem.Function(V)
u.interpolate(f)
adios4dolfinx.write_mesh(mesh, path_out, engine="BP4")
adios4dolfinx.write_function(u, path_out, engine="BP4")
print(
f"Function written to {path_out} on comm {mesh.comm.rank+1}/{mesh.comm.size}", flush=True)
cluster_2 = ipp.Cluster(engines="mpi", n=2)
rc_2 = cluster_2.start_and_connect_sync()
query_2 = rc_2[:].apply_async(
read_mesh_and_create_function, checkpoint_0, checkpoint_1, el, f)
query_2.wait()
assert query_2.successful(), query_2.error
for msg in query_2.stdout:
print(msg)
cluster_2.stop_cluster_sync()
def read_and_compare(path, el: Tuple[str, str], f: Callable[[np.ndarray], np.ndarray]):
"""
Read in function from file and compare with analytical input solution
"""
from mpi4py import MPI
import dolfinx
import adios4dolfinx
import numpy as np
mesh = adios4dolfinx.read_mesh(
MPI.COMM_WORLD, path, engine="BP4", ghost_mode=dolfinx.mesh.GhostMode.shared_facet)
print(
f"Mesh read from {path} on comm {mesh.comm.rank+1}/{mesh.comm.size}", flush=True)
V = dolfinx.fem.functionspace(mesh, el)
u_ex = dolfinx.fem.Function(V)
u_ex.interpolate(f)
u = dolfinx.fem.Function(V)
adios4dolfinx.read_function(u, path, engine="BP4")
print(
f"Function read from {path} on comm {mesh.comm.rank+1}/{mesh.comm.size}")
np.testing.assert_allclose(u.x.array, u_ex.x.array, atol=1e-15)
print(f"Assertion passed on comm {mesh.comm.rank+1}/{mesh.comm.size}")
# Finally, we read the mesh and function on 4 processors and compare it with the analytical solution
cluster_3 = ipp.Cluster(engines="mpi", n=4)
rc_2 = cluster_3.start_and_connect_sync()
query_3 = rc_2[:].apply_async(read_and_compare, checkpoint_1, el, f)
query_3.wait()
assert query_3.successful(), query_3.error
for msg in query_3.stdout:
print(msg)
cluster_3.stop_cluster_sync()
Returning
Mesh written to checkpoint_0.bp on comm 1/3
Mesh written to checkpoint_0.bp on comm 2/3
Mesh written to checkpoint_0.bp on comm 3/3
Mesh read from checkpoint_0.bp on comm 1/2
Function written to checkpoint_1.bp on comm 1/2
Mesh read from checkpoint_0.bp on comm 2/2
Function written to checkpoint_1.bp on comm 2/2
Mesh read from checkpoint_1.bp on comm 1/4
Function read from checkpoint_1.bp on comm 1/4
Assertion passed on comm 1/4
Mesh read from checkpoint_1.bp on comm 2/4
Function read from checkpoint_1.bp on comm 2/4
Assertion passed on comm 2/4
Mesh read from checkpoint_1.bp on comm 3/4
Function read from checkpoint_1.bp on comm 3/4
Assertion passed on comm 3/4
Mesh read from checkpoint_1.bp on comm 4/4
Function read from checkpoint_1.bp on comm 4/4
Assertion passed on comm 4/4