Implementing the discrete weak gradient operators

Dear FeniCS communtiy.

I am a beginner in FEniCS. I’ve been trying to solve a minimization problem derived from LDG, but I am having difficulty in solving the descent direction. Paper

But in this scheme, DG discrete weak gradient operators are needed

Here is my MWE:

# p=2 case
from fenics import *
from dolfin import *
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
import math

mesh = RectangleMesh(Point(0, 0), Point(1, 1), size, size)
f = Expression(" 2*pow(pi,2)*sin(pi*x[0])*sin(pi*x[1])", degree=5,pi=math.pi)


P1 = VectorElement("DG", triangle, pdeg)
P2 = VectorElement("DG", triangle, pdeg)
P3 = FiniteElement("DG", triangle, pdeg)
P1P2P3 = MixedElement([P1, P2, P3])
W = FunctionSpace(mesh, P1P2P3)

V1 = FunctionSpace(mesh, P1)
V2 = FunctionSpace(mesh, P2)
V3 = FunctionSpace(mesh, P3)

n = FacetNormal(mesh)
h = CellDiameter(mesh)
h_avg = (h('+') + h('-'))/2

# norm
def vecnorm(u):
  absu = pow(inner(u,u),0.5)
  return absu

def scalarnorm(u):
  absu = pow(pow(u,2),0.5)
  return absu

# DG discrete weak gradient operators
def Discrete_weak_gradient(v,g):
  (q,sigma,uu) = TrialFunctions(W)
  (zeta,tau,vv) = TestFunctions(W)
  v = project(v,V3) # function
  g = project(g,V3) # boundary
  C12 = Constant((0,0))
  bc = DirichletBC(W.sub(2), g, "on_boundary")

  L = inner(q,zeta)*dx

  R = -v*div(zeta)*dx + avg(v)*inner(jump(zeta),n('+'))*dS\
    + inner(C12,jump(v,n))*inner(jump(zeta),n('+'))*dS + g*inner(zeta,n)*ds

  F = L - R
  L = lhs(F)
  R = rhs(F)

  w = Function(W)
  solve(L==R,w,bc)
  (q,sigma,u) = w.split()

  return q

# nonlinear coefficient
def nonlinear(vec):
  nonlinear = pow((vecnorm(vec)),p-2)*vec
  return nonlinear


# derivative of the energy functional
def dJudv_fn(u,g,v,alpha,Dwg1,Dwg2): #v function or testfunction
  alpha = Constant(alpha)
  nonlinear1 = nonlinear(Dwg1)
  nonlinear2 = nonlinear((1/h_avg)*jump(u,n))
  nonlinear3 = nonlinear((1/h)*(u-g)*n)
  dJudv_fn = f*v*dx
  dJudv_fn = inner(nonlinear1,Dwg2)*dx + alpha*inner(nonlinear2,jump(v,n))*dS\
    + alpha*inner(nonlinear3,v*n)*ds - f*v*dx
  return dJudv_fn

# solve the direction
alpha = 1.0

w = Function(V3)
u = Function(V3)
v = Function(V3)
u0 = project(Expression("(x[0]-1)*x[0]*(x[1]-1)*x[1]", degree=5), V3)

Dwg_u0_g = Discrete_weak_gradient(u0,Constant(0.0))
Dwg_v_0 = Discrete_weak_gradient(v,Constant(0.0))
Dwg_w_0 = Discrete_weak_gradient(w,Constant(0.0))

w = TrialFunction(V3)
v = TestFunction(V3)


weighted = inner(Dwg_w_0,Dwg_v_0)*dx\
       + Constant(alpha)*(1/h_avg)*inner(jump(w,n),jump(v,n))*dS\
       + Constant(alpha)*(1/h)*w*v*ds

dJudu = dJudv_fn(u0,Constant(0.0),v,alpha,Dwg_u0_g,Dwg_v_0)

ww = Function(V3)
bc = DirichletBC(V3,Constant(0.0), "on_boundary")
solve( weighted==-dJudu , ww, bc)

error message:

When I use grad() instead of Discrete_weak_gradient(), it works.
I think the issue is that Dwg_v_0 is not a test function, but I’m not sure how to fix this in FEniCS.