Hello,
In order to carry out topology optimisation simulations in fluid mechanics using the density based method, I need to modify a q value over the iterations, used in the k function:
def k(alpha):
return k_max + (k_min - k_max) * alpha * (1. + q) / (alpha + q)
However, I can’t get the optimisation to take account of the change in the value of q. I get exactly the same result without changing the value of q. I also tried to define k as def(k,q) but without success.
Here is part of the code:
# Initial setup for topology optimization
alpha = interpolate(Constant(f_V-0.01), A) # Initial guess for the design variable
set_working_tape(Tape()) # Clear all annotations and restart the adjoint model
# Solve the simulation
u = solve_forward_problem(alpha)
# Visualization files
alpha_pvd_file = File("%s/alpha_iterations.pvd" %(output_folder)); alpha_viz = Function(A, name = "AlphaVisualisation")
dj_pvd_file = File("%s/dj_iterations.pvd" %(output_folder)); dj_viz = Function(A, name = "dJVisualisation")
# Callback during topology optimization
global current_iteration
current_iteration = 0
def derivative_cb_post(j, dj, current_alpha):
global current_iteration
if current_iteration >=10:
q = 0.1
print("\n [Iteration: %d] J = %1.7e, q = %1.2f\n" %(current_iteration, j, q))
obj.append(j)
ité.append(current_iteration)
current_iteration += 1
# Save for visualization
alpha_viz.assign(current_alpha); alpha_pvd_file << alpha_viz
dj_viz.assign(dj); dj_pvd_file << dj_viz
if current_iteration >=10:
q = 0.1
# Objective function
u_split = split(u); v = u_split[0]
J = assemble((1/2.*(mu_)*inner(grad(v) + grad(v).T, grad(v) + grad(v).T) + inner(k(alpha) * v, v))*dx)
#J = assemble(w_ud*dot(v - u_target, v - u_target) * dx(6) + inner(k(alpha) * v, v)*dx)
print(" Current objective function value: %1.7e" %(J))
# Set the topology optimization problem and solver
alpha_C = Control(alpha)
Jhat = ReducedFunctional(J, alpha_C, derivative_cb_post = derivative_cb_post)
problem_min = MinimizationProblem(Jhat, bounds = (0.0, 1.0), constraints = [UFLInequalityConstraint((f_V - alpha)*dx, alpha_C)])
solver_opt = IPOPTSolver(problem_min, parameters = {'maximum_iterations': 50 })
# Perform topology optimization
alpha_opt = solver_opt.solve()
alpha.assign(alpha_opt); alpha_viz.assign(alpha)
alpha_pvd_file << alpha_viz
Thank you !