Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
73ff21a
change `get_free_values` by `get_free_dof_values`
tmigot Nov 5, 2021
1785916
replace `get_cell_shapefuns_trial` by `get_trial_fe_basis`
tmigot Nov 5, 2021
b062433
rename `get_cell_shapefuns` by `get_fe_basis`
tmigot Nov 5, 2021
9678bf3
add spaces in `collect_cell_matrix`
tmigot Nov 5, 2021
8c1c0a5
update gradient objective
tmigot Nov 5, 2021
54ef405
add space in `collect_cell_vector`
tmigot Nov 5, 2021
267ce94
use grad of jac
tmigot Nov 5, 2021
73b8850
change modelling `dt`
tmigot Nov 5, 2021
d94028a
rmv `BlockArrayCOO`
tmigot Nov 5, 2021
458ee69
patch for unconstrained problems
tmigot Nov 6, 2021
6590877
fix merge
tmigot Jan 4, 2022
8326de5
unconstrained works with Gridap 0.17.8
tmigot Jan 4, 2022
42eb6a2
fix lagrangian hessian structure
tmigot Jan 5, 2022
1577da8
fix the jacobian build
tmigot Jan 5, 2022
8236664
fix hessian computation
tmigot Jan 5, 2022
8dfb474
fix gradient with y and u
tmigot Jan 6, 2022
27af76c
rmv LowerTriangular for jac
tmigot Jan 6, 2022
880b7cd
update `dt`
tmigot Jan 6, 2022
e6877a9
tempory fix `_jacobian2`
tmigot Jan 6, 2022
c046b80
fix _get_cell_dofs_field_offsets not defined
tmigot Jan 7, 2022
70e41f0
tailored hessian for singlefield function
tmigot Jan 7, 2022
0d10210
fix `poissonmixed2` models and add `trialspace` check
tmigot Jan 7, 2022
aef29fa
add `_to_multifieldfespace` function
tmigot Jan 8, 2022
34752b0
add `AffineFEOperatorControl`
tmigot Jan 8, 2022
4a528b1
update tested list [to be squashed]
tmigot Jan 8, 2022
05e538c
fix the bug with the jacobian w.r.t. x
tmigot Jan 27, 2022
67218a2
split the jacobian rows in three (k, y, u)
tmigot Jan 27, 2022
e1c5100
optimize jacobian computation
tmigot Jan 27, 2022
9599675
split the hessian structure
tmigot Jan 27, 2022
2fbe758
fix type in `sparse` allocation, and use gridap official
tmigot Feb 14, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[compat]
FastClosures = "0.2, 0.3"
ForwardDiff = "0.9.0, 0.10.0"
Gridap = "0.15.5"
Gridap = "^0.17.8"
NLPModels = "0.15, 0.16, 0.17, 0.18"
julia = "^1.6.0"

Expand Down
118 changes: 101 additions & 17 deletions src/GridapPDENLPModel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,20 @@ The following arguments are accepted:
- nvar_con : number of dofs in the control functions
- nparam: : number of real unknowns
- nnzh_obj : number of nonzeros elements in the objective hessian
- Hrows : store the structure for the hessian of the lagrangian
- Hcols : store the structure for the hessian of the lagrangian
- Jrows : store the structure for the hessian of the jacobian
- Jcols : store the structure for the hessian of the jacobian
- Hobjkrows : store the structure for the hessian of the objective w.r.t. k
- Hobjkcols : store the structure for the hessian of the objective w.r.t. k
- Hobjrows : store the structure for the hessian of the objective
- Hobjcols : store the structure for the hessian of the objective
- Hkrows : store the structure for the hessian of the constraints w.r.t. k
- Hkcols : store the structure for the hessian of the constraints w.r.t. k
- Hrows : store the structure for the hessian of the constraints
- Hcols : store the structure for the hessian of the constraints
- Jkrows : store the structure for the hessian of the jacobian w.r.t. k
- Jkcols : store the structure for the hessian of the jacobian w.r.t. k
- Jyrows : store the structure for the hessian of the jacobian w.r.t. y
- Jycols : store the structure for the hessian of the jacobian w.r.t. y
- Jurows : store the structure for the hessian of the jacobian w.r.t. u
- Jucols : store the structure for the hessian of the jacobian w.r.t. u
"""
struct PDENLPMeta{NRJ <: AbstractEnergyTerm, Op <: Union{FEOperator, Nothing}}
# For the objective function
Expand All @@ -42,10 +52,20 @@ struct PDENLPMeta{NRJ <: AbstractEnergyTerm, Op <: Union{FEOperator, Nothing}}
nnzh_obj::Int

# store the structure for hessian and jacobian matrix
Hobjkrows::AbstractVector{Int}
Hobjkcols::AbstractVector{Int}
Hobjrows::AbstractVector{Int}
Hobjcols::AbstractVector{Int}
Hkrows::AbstractVector{Int}
Hkcols::AbstractVector{Int}
Hrows::AbstractVector{Int}
Hcols::AbstractVector{Int}
Jrows::AbstractVector{Int}
Jcols::AbstractVector{Int}
Jkrows::AbstractVector{Int}
Jkcols::AbstractVector{Int}
Jyrows::AbstractVector{Int}
Jycols::AbstractVector{Int}
Jurows::AbstractVector{Int}
Jucols::AbstractVector{Int}
end

struct PDEWorkspace{T, S, M}
Expand Down Expand Up @@ -187,9 +207,35 @@ function hess_coord!(
luh = _obj_integral(nlp.pdemeta.tnrj, κ, yu)
lag_hess = Gridap.FESpaces._hessian(x -> _obj_integral(nlp.pdemeta.tnrj, κ, x), yu, luh)

matdata = Gridap.FESpaces.collect_cell_matrix(lag_hess)
matdata = Gridap.FESpaces.collect_cell_matrix(nlp.pdemeta.Y, nlp.pdemeta.X, lag_hess)
assem = SparseMatrixAssembler(nlp.pdemeta.Y, nlp.pdemeta.X)
nini = fill_hess_coo_numeric!(vals, assem, matdata, n = nini)
###############################################################
# TO BE IMPROVED

m1 = Gridap.FESpaces.nz_counter(
Gridap.FESpaces.get_matrix_builder(assem),
(Gridap.FESpaces.get_rows(assem), Gridap.FESpaces.get_cols(assem)),
) # Gridap.Algebra.CounterCS
Gridap.FESpaces.symbolic_loop_matrix!(m1, assem, matdata)
m2 = Gridap.FESpaces.nz_allocation(m1) # Gridap.Algebra.InserterCSC
#@show size(Gridap.FESpaces.create_from_nz(m2))
Gridap.FESpaces.numeric_loop_matrix!(m2, assem, matdata)
m3 = sparse(LowerTriangular(Gridap.FESpaces.create_from_nz(m2)))
_, _, v = findnz(m3)
vals[(nini + 1):(nini + length(v))] .= v
nini += length(v)

##############################################################
#=
nnzh = length(nlp.pdemeta.Hobjrows)
@show nlp.meta.nvar, nlp.pdemeta.nparam
ms = sparse(nlp.pdemeta.Hobjrows, nlp.pdemeta.Hobjcols, zeros(T, nnzh), nlp.meta.nvar - nlp.pdemeta.nparam, nlp.meta.nvar - nlp.pdemeta.nparam) # SparseArrays.sparse!
Gridap.FESpaces.numeric_loop_matrix!(ms, assem, matdata)
ms_lower = sparse(LowerTriangular(Gridap.FESpaces.create_from_nz(ms)))
vals[(nini + 1):(nini + nnzh)] .= ms_lower.nzval
nini += nnzh
=#

end

if nini < nlp.meta.nnzh
Expand Down Expand Up @@ -224,7 +270,8 @@ function hprod!(
end

function hess_structure(nlp::GridapPDENLPModel)
return (nlp.pdemeta.Hrows, nlp.pdemeta.Hcols)
np = nlp.pdemeta.nparam
return (vcat(nlp.pdemeta.Hobjkrows, nlp.pdemeta.Hobjrows .+ np, nlp.pdemeta.Hkrows, nlp.pdemeta.Hrows .+ np), vcat(nlp.pdemeta.Hobjkcols, nlp.pdemeta.Hobjcols .+ np, nlp.pdemeta.Hkcols, nlp.pdemeta.Hcols .+ np))
end

function hess_structure!(
Expand All @@ -233,8 +280,9 @@ function hess_structure!(
cols::AbstractVector{T},
) where {T <: Integer}
@lencheck nlp.meta.nnzh rows cols
rows .= T.(nlp.pdemeta.Hrows)
cols .= T.(nlp.pdemeta.Hcols)
np = nlp.pdemeta.nparam
rows .= T.(vcat(nlp.pdemeta.Hobjkrows, nlp.pdemeta.Hobjrows .+ np, nlp.pdemeta.Hkrows, nlp.pdemeta.Hrows .+ np))
cols .= T.(vcat(nlp.pdemeta.Hobjkcols, nlp.pdemeta.Hobjcols .+ np, nlp.pdemeta.Hkcols, nlp.pdemeta.Hcols .+ np))
return (rows, cols)
end

Expand Down Expand Up @@ -301,10 +349,38 @@ function hess_coord!(
end
luh = split_res(xh, λf)

lag_hess = Gridap.FESpaces._hessian(x -> split_res(x, λf), xh, luh)
matdata = Gridap.FESpaces.collect_cell_matrix(lag_hess)
# lag_hess = Gridap.FESpaces.jacobian(Gridap.FESpaces._gradient(x -> split_res(x, λf), xh, luh), xh) #
# lag_hess = Gridap.FESpaces._hessian(x -> split_res(x, λf), xh, luh)
lag_hess = _hessianv1(x -> split_res(x, λf), xh, luh)
matdata = Gridap.FESpaces.collect_cell_matrix(nlp.pdemeta.Y, nlp.pdemeta.X, lag_hess)
assem = SparseMatrixAssembler(nlp.pdemeta.Y, nlp.pdemeta.X)
fill_hess_coo_numeric!(vals, assem, matdata, n = nini)
##############################################################
# TO BE IMPROVED

m1 = Gridap.FESpaces.nz_counter(
Gridap.FESpaces.get_matrix_builder(assem),
(Gridap.FESpaces.get_rows(assem), Gridap.FESpaces.get_cols(assem)),
) # Gridap.Algebra.CounterCS
Gridap.FESpaces.symbolic_loop_matrix!(m1, assem, matdata)
m2 = Gridap.FESpaces.nz_allocation(m1) # Gridap.Algebra.InserterCSC
Gridap.FESpaces.numeric_loop_matrix!(m2, assem, matdata)
m3 = sparse(LowerTriangular(Gridap.FESpaces.create_from_nz(m2)))
_, _, v = findnz(m3)
vals[(nini + 1):(nini + length(v))] .= v
nini += length(v)

# nini = fill_hess_coo_numeric!(vals, assem, matdata, n = nini)
##############################################################
#=
# https://github.com/gridap/Gridap.jl/blob/master/src/FESpaces/SparseMatrixAssemblers.jl
nnzh = length(nlp.pdemeta.Hrows)
@show nlp.meta.nvar, nlp.pdemeta.nparam
ms = sparse(nlp.pdemeta.Hrows, nlp.pdemeta.Hcols, zeros(T, nnzh), nlp.meta.nvar - nlp.pdemeta.nparam, nlp.meta.nvar - nlp.pdemeta.nparam) # SparseArrays.sparse!
Gridap.FESpaces.numeric_loop_matrix!(ms, assem, matdata)
ms_lower = sparse(LowerTriangular(Gridap.FESpaces.create_from_nz(ms)))
vals[(nini + 1):(nini + nnzh)] .= ms_lower.nzval
nini += nnzh
=#
end

return vals
Expand Down Expand Up @@ -380,7 +456,9 @@ function jtprod!(nlp::GridapPDENLPModel, x::AbstractVector, v::AbstractVector, J
end

function jac_structure(nlp::GridapPDENLPModel)
return (nlp.pdemeta.Jrows, nlp.pdemeta.Jcols)
nparam = nlp.pdemeta.nparam
ny = num_free_dofs(nlp.pdemeta.Ypde)
return (vcat(nlp.pdemeta.Jkrows, nlp.pdemeta.Jyrows, nlp.pdemeta.Jurows), vcat(nlp.pdemeta.Jkcols, nlp.pdemeta.Jycols .+ nparam, nlp.pdemeta.Jucols .+ nparam .+ ny))
end

function jac_structure!(
Expand All @@ -389,8 +467,10 @@ function jac_structure!(
cols::AbstractVector{T},
) where {T <: Integer}
@lencheck nlp.meta.nnzj rows cols
rows .= T.(nlp.pdemeta.Jrows)
cols .= T.(nlp.pdemeta.Jcols)
nparam = nlp.pdemeta.nparam
ny = num_free_dofs(nlp.pdemeta.Ypde)
rows .= T.(vcat(nlp.pdemeta.Jkrows, nlp.pdemeta.Jyrows, nlp.pdemeta.Jurows))
cols .= T.(vcat(nlp.pdemeta.Jkcols, nlp.pdemeta.Jycols .+ nparam, nlp.pdemeta.Jucols .+ nparam .+ ny))
return rows, cols
end

Expand All @@ -407,6 +487,10 @@ function jac_coord!(nlp::GridapPDENLPModel, x::AbstractVector, vals::AbstractVec
nlp.pdemeta.Xpde,
nlp.pdemeta.Ycon,
x,
nlp.pdemeta.Jyrows,
nlp.pdemeta.Jycols,
nlp.pdemeta.Jurows,
nlp.pdemeta.Jucols,
vals,
nlp.workspace.c,
nlp.workspace.Jk,
Expand Down
4 changes: 3 additions & 1 deletion src/PDENLPModels.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
module PDENLPModels

#This package contains a list of NLPModels for PDE optimization.
using LinearAlgebra: length
using ForwardDiff, LinearAlgebra, SparseArrays, FastClosures

#JSO packages
Expand Down Expand Up @@ -39,6 +38,9 @@ function Gridap.FESpaces.scatter_free_and_dirichlet_values(
lazy_map(Broadcasting(Gridap.Arrays.PosNegReindex(free_values, dirichlet_values)), cell_dof_ids)
end

include("gridap_autodiff.jl")
include("gridap_utils.jl")

#Regroup the different types of FEFunction
const FEFunctionType =
Union{Gridap.FESpaces.SingleFieldFEFunction, Gridap.MultiField.MultiFieldFEFunction}
Expand Down
63 changes: 35 additions & 28 deletions src/additional_constructors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ function GridapPDENLPModel(

@assert nparam ≥ 0 throw(DimensionError("x0", nvar_pde, nvar))

rows, cols, nnzh = _compute_hess_structure(tnrj, Y, X, x0, nparam)
robj, rck, cobj, cck, no, nk = _compute_hess_structure(tnrj, Y, X, x0, nparam)
nnzh = nk + no

if NRJ <: NoFETerm && typeof(lvar) <: AbstractVector && typeof(uvar) <: AbstractVector
lv, uv = lvar, uvar
Expand Down Expand Up @@ -54,8 +55,18 @@ function GridapPDENLPModel(
nvar_con,
nparam,
nnzh,
rows,
cols,
robj,
cobj,
rck,
cck,
Int[],
Int[],
Int[],
Int[],
Int[],
Int[],
Int[],
Int[],
Int[],
Int[],
)
Expand Down Expand Up @@ -169,23 +180,7 @@ function GridapPDENLPModel(

@assert nparam >= 0 throw(DimensionError("x0", nvar_pde + nvar_con, nvar))

if !(typeof(Xcon) <: VoidFESpace) && !(typeof(Ycon) <: VoidFESpace)
_xpde = _fespace_to_multifieldfespace(Xpde)
_xcon = _fespace_to_multifieldfespace(Xcon)
#Handle the case where Ypde or Ycon are single field FE space(s).
_ypde = _fespace_to_multifieldfespace(Ypde)
_ycon = _fespace_to_multifieldfespace(Ycon)
#Build Y (resp. X) the trial (resp. test) space of the Multi Field function [y,u]
X = MultiFieldFESpace(vcat(_xpde.spaces, _xcon.spaces))
Y = MultiFieldFESpace(vcat(_ypde.spaces, _ycon.spaces))
elseif (typeof(Xcon) <: VoidFESpace) ⊻ (typeof(Ycon) <: VoidFESpace)
throw(ErrorException("Error: Xcon or Ycon are both nothing or must be specified."))
else
#_xpde = _fespace_to_multifieldfespace(Xpde)
X = Xpde #_xpde
#_ypde = _fespace_to_multifieldfespace(Ypde)
Y = Ypde #_ypde
end
Y, X = _to_multifieldfespace(Ypde, Xpde, Ycon, Xcon)

if NRJ == NoFETerm && typeof(lvary) <: AbstractVector && typeof(uvary) <: AbstractVector
lvar, uvar = vcat(lvary, lvaru, lvark), vcat(uvary, uvaru, uvark)
Expand All @@ -203,15 +198,17 @@ function GridapPDENLPModel(
@lencheck nvar lvar uvar
@lencheck ncon ucon y0

rows, cols, nnzh = _compute_hess_structure(tnrj, c, Y, Ypde, Ycon, X, Xpde, x0, nparam)
_, _, nnzh_obj = _compute_hess_structure(tnrj, Y, X, x0, nparam)
rk, ro, rck, rc, ck, co, cck, cc, nk, no, nck, nc = _compute_hess_structure(tnrj, c, Y, Ypde, Ycon, X, Xpde, x0, nparam)
nnzh_obj = nk + no
nnzh = nnzh_obj + nck + nc
#_, _, nnzh_obj = _compute_hess_structure(tnrj, Y, X, x0, nparam)

if typeof(c) <: AffineFEOperator #Here we expect ncon = nvar_pde
lin = 1:ncon
end
Jkrows, Jkcols, nnz_jac_k = jac_k_structure(nparam, ncon)
Jrows, Jcols, nini = _jacobian_struct(c, x0, Y, Xpde, Ypde, Ycon)
nnzj = nini + nnz_jac_k
Jyrows, Jurows, Jycols, Jucols, nini_y, nini_u = _jacobian_struct(c, x0, Y, Xpde, Ypde, Ycon)
nnzj = nini_y + nini_u + nnz_jac_k

meta = NLPModelMeta{T, S}(
nvar,
Expand Down Expand Up @@ -243,10 +240,20 @@ function GridapPDENLPModel(
nvar_con,
nparam,
nnzh_obj,
rows,
cols,
vcat(Jkrows, Jrows),
vcat(Jkcols, Jcols .+ nparam),
rk,
ck,
ro,
co,
rck,
cck,
rc,
cc,
Jkrows,
Jkcols,
Jyrows,
Jycols,
Jurows,
Jucols,
)

workspace = PDEWorkspace(T, S, nvar, ncon, nparam, nnzh, nnzj)
Expand Down
45 changes: 32 additions & 13 deletions src/additional_obj_terms.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
trial_check(::TrialFESpace) = true
trial_check(::Gridap.FESpaces.DirichletFESpace) = true
trial_check(::Gridap.FESpaces.FESpaceWithConstantFixed) = true
trial_check(::Gridap.FESpaces.ZeroMeanFESpace) = true
trial_check(::Gridap.FESpaces.UnconstrainedFESpace) = true # When Ycon = TrialFESpace(Xcon), we have typeof(Xcon) == typeof(Ycon)
trial_check(::VoidFESpace) = true
trial_check(Y::MultiFieldFESpace) = all(trial_check, Y)

abstract type AbstractEnergyTerm end

"""
Expand Down Expand Up @@ -128,6 +136,12 @@ struct EnergyFETerm <: AbstractEnergyTerm
trian::Triangulation
Ypde::FESpace
Ycon::FESpace

function EnergyFETerm(f, trian, Ypde, Ycon)
@assert trial_check(Ypde)
@assert trial_check(Ycon)
return new(f, trian, Ypde, Ycon)
end
end

function EnergyFETerm(f::Function, trian::Triangulation, Ypde)
Expand All @@ -153,14 +167,16 @@ function _compute_gradient!(
X::FESpace,
)
@lencheck 0 κ
cell_yu = Gridap.FESpaces.get_cell_dof_values(yu)
cell_id_yu = Gridap.Arrays.IdentityVector(length(cell_yu))
cell_r_yu = get_array(gradient(x -> _obj_integral(tnrj, κ, x), yu))
#Put the result in the format expected by Gridap.FESpaces.assemble_vector!
vecdata_yu = [[cell_r_yu], [cell_id_yu]] #TODO would replace by Tuple work?
fsplit(x) = if typeof(tnrj.Ycon) <: VoidFESpace
return tnrj.f(x)
else
y, u = _split_FEFunction(x, tnrj.Ypde, tnrj.Ycon)
return tnrj.f(y, u)
end
vec = Gridap.FESpaces.collect_cell_vector(X, gradient(fsplit, yu))
#Assemble the gradient in the "good" space
assem = Gridap.FESpaces.SparseMatrixAssembler(Y, X)
Gridap.FESpaces.assemble_vector!(g, assem, vecdata_yu)
Gridap.FESpaces.assemble_vector!(g, assem, vec)
return g
end

Expand Down Expand Up @@ -216,6 +232,8 @@ struct MixedEnergyFETerm <: AbstractEnergyTerm
Ycon::FESpace,
)
@assert n > 0
@assert trial_check(Ypde)
@assert trial_check(Ycon)
return new(f, trian, n, inde, Ypde, Ycon)
end
end
Expand Down Expand Up @@ -255,18 +273,19 @@ function _compute_gradient!(
nyu = num_free_dofs(Y)
@lencheck tnrj.nparam + nyu g

cell_yu = Gridap.FESpaces.get_cell_dof_values(yu)
cell_id_yu = Gridap.Arrays.IdentityVector(length(cell_yu))

cell_r_yu = get_array(gradient(x -> _obj_integral(tnrj, κ, x), yu))
#Put the result in the format expected by Gridap.FESpaces.assemble_vector
vecdata_yu = [[cell_r_yu], [cell_id_yu]] #TODO would replace by Tuple work?
fsplit(x) = if typeof(tnrj.Ycon) <: VoidFESpace
return tnrj.f(κ, x)
else
y, u = _split_FEFunction(x, tnrj.Ypde, tnrj.Ycon)
return tnrj.f(κ, y, u)
end
vec = Gridap.FESpaces.collect_cell_vector(X, gradient(fsplit, yu))
#Assemble the gradient in the "good" space
assem = Gridap.FESpaces.SparseMatrixAssembler(Y, X)
Gridap.FESpaces.assemble_vector!(
view(g, (tnrj.nparam + 1):(tnrj.nparam + nyu)),
assem,
vecdata_yu,
vec,
)

_compute_gradient_k!(view(g, 1:(tnrj.nparam)), tnrj, κ, yu)
Expand Down
Loading