Hello everyone,
I tried to solve time distributed control demo with IPOPT solver in parallel using MPI and it didn’t work.
Usually (or at least with lbfgsb) I can run any serial dolfin adjoint programms with MPI just running it with comand mpirun -n
…
For instance running the following MWE with mpirun -n 2 python 3
works just fine
from fenics import *
from fenics_adjoint import *
from collections import OrderedDict
set_log_active(False)
import numpy as np
data =Constant(2)
nu = Constant(1e-5)
dt = Constant(0.1)
T = 2
PDEs_numb = 1
numDataPerRank = int(PDEs_numb)
alphas = np.ones(PDEs_numb)
my_alpha = alphas
nx = ny = 20
mesh = UnitSquareMesh( nx, nx)
V = FunctionSpace(mesh, "CG", 1)
ctrls = OrderedDict()
t = float(dt)
while t <= T:
ctrls[t] = Function(V)
t += float(dt)
def poisson(alpha, u_0, f):
u, v = TrialFunction(V), TestFunction(V)
bc = DirichletBC(V, Constant(0), 'on_boundary')
a = (u*v + dt*inner(Constant(alpha)*nu*grad(u), grad(v)))*dx
L = (u_0 + dt*f)*v*dx
uh = Function(V)
solve(a == L, uh, bc)
return uh
def solve_heat(ctrls):
u_0 = []
f = Function(V, name="source")
d = Function(V, name="data")
d.assign(project(data, V))
t = float(dt)
j = 0
for i in range(numDataPerRank):
u_0.append(Function(V, name="solution"))
j += 0.5 * float(dt) * assemble((u_0[i] - d) ** 2 * dx)
while t <= T:
f.assign(ctrls[t])
for i in range(numDataPerRank):
u_0[i] = poisson(my_alpha[i], u_0[i], f)
j += 0.5 * float(dt) * assemble((u_0[i] - d) ** 2 * dx)
t += float(dt)
return u_0, d, j
u, d, j = solve_heat(ctrls)
alpha1 = Constant(1e-5)
regularisation = alpha1/2*sum([1/dt*(fb-fa)**2*dx for fb, fa in
zip(list(ctrls.values())[1:], list(ctrls.values())[:-1])])
J = j + assemble(regularisation)
m = [Control(c) for c in ctrls.values()]
rf = ReducedFunctional(J, m)
opt_ctrls = minimize(rf, options={"maxiter": 3, 'disp': True})
Solving the same problem (MWE below) also on 2 processes (with mpirun -n 2 python 3
) but with IPOPT solver, it freezes at the first iteration. When I run it on single process (mpirun -n 1 python 3
) or in serial (python 3
) it works fine though.
from fenics import *
from fenics_adjoint import *
from collections import OrderedDict
set_log_active(False)
import numpy as np
try:
from pyadjoint import ipopt # noqa: F401
except ImportError:
print("""This example depends on IPOPT and Python ipopt bindings. \
When compiling IPOPT, make sure to link against HSL, as it \
is a necessity for practical problems.""")
raise
data =Constant(2)
nu = Constant(1e-5)
dt = Constant(0.1)
T = 2
PDEs_numb = 1
numDataPerRank = int(PDEs_numb)
alphas = np.ones(PDEs_numb)
my_alpha = alphas
nx = ny = 20
mesh = UnitSquareMesh( nx, nx)
V = FunctionSpace(mesh, "CG", 1)
ctrls = OrderedDict()
t = float(dt)
while t <= T:
ctrls[t] = Function(V)
t += float(dt)
def poisson(alpha, u_0, f):
u, v = TrialFunction(V), TestFunction(V)
bc = DirichletBC(V, Constant(0), 'on_boundary')
a = (u*v + dt*inner(Constant(alpha)*nu*grad(u), grad(v)))*dx
L = (u_0 + dt*f)*v*dx
uh = Function(V)
solve(a == L, uh, bc)
return uh
def solve_heat(ctrls):
u_0 = []
f = Function(V, name="source")
d = Function(V, name="data")
d.assign(project(data, V))
t = float(dt)
j = 0
for i in range(numDataPerRank):
u_0.append(Function(V, name="solution"))
j += 0.5 * float(dt) * assemble((u_0[i] - d) ** 2 * dx)
while t <= T:
f.assign(ctrls[t])
for i in range(numDataPerRank):
u_0[i] = poisson(my_alpha[i], u_0[i], f)
j += 0.5 * float(dt) * assemble((u_0[i] - d) ** 2 * dx)
t += float(dt)
return u_0, d, j
u, d, j = solve_heat(ctrls)
alpha1 = Constant(1e-5)
regularisation = alpha1/2*sum([1/dt*(fb-fa)**2*dx for fb, fa in
zip(list(ctrls.values())[1:], list(ctrls.values())[:-1])])
J = j + assemble(regularisation)
m = [Control(c) for c in ctrls.values()]
rf = ReducedFunctional(J, m)
problem = MinimizationProblem(rf)
parameters = {"acceptable_tol": 1.0e-3, "maximum_iterations": 3}
solver = IPOPTSolver(problem, parameters=parameters)
opt_ctrls = solver.solve()
Am I missing something? Or IPOPT just doesn’t support MPI?
Best regards