Skip to content

Commit

Permalink
overlapping points losses added
Browse files Browse the repository at this point in the history
  • Loading branch information
AstitvaAggarwal committed Nov 25, 2023
1 parent 98e0a94 commit d48b1c8
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 66 deletions.
57 changes: 13 additions & 44 deletions src/PDE_BPINN.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,13 @@ mutable struct PDELogTargetDensity{
priors::P
allstd::Vector{Vector{Float64}}
names::Tuple
physdt::Vector{Float64}
extraparams::Int
init_params::I
full_loglikelihood::F
Phi::PH

function PDELogTargetDensity(dim, strategy, dataset,
priors, allstd, names, physdt, extraparams,
priors, allstd, names, extraparams,
init_params::AbstractVector, full_loglikelihood, Phi)
new{
typeof(strategy),
Expand All @@ -34,14 +33,13 @@ mutable struct PDELogTargetDensity{
priors,
allstd,
names,
physdt,
extraparams,
init_params,
full_loglikelihood,
Phi)
end
function PDELogTargetDensity(dim, strategy, dataset,
priors, allstd, names, physdt, extraparams,
priors, allstd, names, extraparams,
init_params::Union{NamedTuple, ComponentArrays.ComponentVector},
full_loglikelihood, Phi)
new{
Expand All @@ -57,7 +55,6 @@ mutable struct PDELogTargetDensity{
priors,
allstd,
names,
physdt,
extraparams,
init_params,
full_loglikelihood,
Expand Down Expand Up @@ -113,7 +110,7 @@ function setparameters(Tar::PDELogTargetDensity, θ)
depvar = a)
end
else
# multioutput Lux case
# multioutput fLux case
return vector_to_parameters(Luxparams, ps)
end
end
Expand Down Expand Up @@ -231,44 +228,9 @@ function adaptorchoice(Adaptor, mma, ssa)
end
end

# function inference(samples, discretization, saveat, numensemble, ℓπ)
# ranges = []
# for i in eachindex(domains)
# push!(ranges, [infimum(domains[i].domain), supremum(infimum(domains[i].domain))])
# end
# ranges = map(ranges) do x
# collect(x[1]:saveat:x[2])
# end
# samples = samples[(end - numensemble):end]
# chain = discretization.chain

# if discretization.multioutput && chain[1] isa Lux.AbstractExplicitLayer
# temp = [setparameters(ℓπ, samples[i]) for i in eachindex(samples)]

# luxar = map(temp) do x
# chain(t', x, st[i])
# end

# elseif discretization.multioutput && chain[1] isa Flux.chain

# elseif chain isa Flux.Chain
# re = Flux.destructure(chain)[2]
# out1 = re.([sample for sample in samples])
# luxar = [collect(out1[i](t') for t in ranges)]
# fluxmean = map(luxar) do x
# mean(vcat(x...)[:, i]) for i in eachindex(x)
# end
# else
# transsamples = [vector_to_parameters(sample, initl) for sample in samples]
# luxar2 = [chainl(t1', transsamples[i], st)[1] for i in 800:1000]
# luxmean = [mean(vcat(luxar2...)[:, i]) for i in eachindex(t1)]
# end
# end

# priors: pdf for W,b + pdf for ODE params
function ahmc_bayesian_pinn_pde(pde_system, discretization;
strategy = GridTraining, dataset = [nothing],
draw_samples = 1000, physdt = [1 / 20.0],
dataset = [nothing], draw_samples = 1000,
bcstd = [0.01], l2std = [0.05],
phystd = [0.05], priorsNNw = (0.0, 2.0),
param = [], nchains = 1, Kernel = HMC,
Expand All @@ -284,6 +246,14 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization;
bayesian = true,
dataset_given = dataset)

if discretization.param_estim && isempty(param)
throw(UndefVarError(:param))
elseif discretization.param_estim && dataset isa Vector{Nothing}
throw(UndefVarError(:dataset))
elseif discretization.param_estim && length(l2std) != length(pinnrep.depvars)
throw(error("L2 stds length must match number of dependant variables"))
end

# for physics loglikelihood
full_weighted_loglikelihood = pinnrep.loss_functions.full_loss_function
chain = discretization.chain
Expand Down Expand Up @@ -334,7 +304,7 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization;
nparameters += ninv
end

# physdt vector in case of N-dimensional domains
# vector in case of N-dimensional domains
strategy = discretization.strategy

# dimensions would be total no of params,initial_nnθ for Lux namedTuples
Expand All @@ -344,7 +314,6 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization;
priors,
[phystd, bcstd, l2std],
names,
physdt,
ninv,
initial_nnθ,
full_weighted_loglikelihood,
Expand Down
10 changes: 6 additions & 4 deletions src/discretize.jl
Original file line number Diff line number Diff line change
Expand Up @@ -587,15 +587,17 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem,

if bayesian
# required as Physics loss also needed on dataset domain points
pde_loss_functions1, bc_loss_functions1 = if !(dataset_given[1] isa Nothing)
pde_loss_functions1, bc_loss_functions1 = if !(dataset_given isa Vector{Nothing})
if !(strategy isa GridTraining)
println("only GridTraining strategy allowed")
throw("only GridTraining strategy allowed")
else
merge_strategy_with_loglikelihood_function(pinnrep,
strategy,
datafree_pde_loss_functions,
datafree_bc_loss_functions, train_sets_L2loss2 = dataset_given)
end
else
([],[])
end

function full_likelihood_function(θ, allstd)
Expand All @@ -607,7 +609,7 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem,
bc_loglikelihoods = [logpdf(Normal(0, stdbcs[j]), bc_loss_function(θ))
for (j, bc_loss_function) in enumerate(bc_loss_functions)]

if !(dataset_given[1] isa Nothing)
if !(dataset_given isa Vector{Nothing})
pde_loglikelihoods += [logpdf(Normal(0, stdpdes[j]), pde_loss_function1(θ))
for (j, pde_loss_function1) in enumerate(pde_loss_functions1)]

Expand Down Expand Up @@ -653,7 +655,7 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem,
return additional_loss(phi, θ_, p_)
end

_additional_loglikelihood = logpdf(Normal(0, stdextra) _additional_loss(phi, θ))
_additional_loglikelihood = logpdf(Normal(0, stdextra), _additional_loss(phi, θ))

weighted_additional_loglikelihood = adaloss.additional_loss_weights[1] *
_additional_loglikelihood
Expand Down
3 changes: 3 additions & 0 deletions src/training_strategies.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,15 @@ function merge_strategy_with_loglikelihood_function(pinnrep::PINNRepresentation,
dict_indvars, dict_depvars)

bcs_train_sets = train_sets[2]
# vec later each _set in pde_sets as coloumns as points->vector of points(pde_train_sets must be rowwise)
pde_train_sets = [train_set[:, 2:end] for train_set in train_sets_L2loss2]
# the points in the domain and on the boundary

pde_train_sets = adapt.(parameterless_type(ComponentArrays.getdata(flat_init_params)),
pde_train_sets)
bcs_train_sets = adapt.(parameterless_type(ComponentArrays.getdata(flat_init_params)),
bcs_train_sets)

pde_loss_functions = [get_loss_function(_loss, _set, eltypeθ, strategy)
for (_loss, _set) in zip(datafree_pde_loss_function,
pde_train_sets)]
Expand Down
85 changes: 67 additions & 18 deletions test/BPINN_PDE_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@
# x̂ = collect(Float64, Array(u) + 0.2 .* Array(u) .* randn(size(u)))
# time = vec(collect(Float64, ta))
# # x = time .* 2.0
# dataset = [hcat(x̂, time)]
# dataset = [hcat(x̂, time), hcat(x̂, time), hcat(x̂, time, time), hcat(x̂, time, time)]
# hcat(datase[:, 2:end] for datase in dataset)

# discretization = NeuralPDE.PhysicsInformedNN([chainf],
Expand All @@ -137,10 +137,10 @@
# mcmc_chain, samples, stats = ahmc_bayesian_pinn_pde(pde_system,
# discretization;
# draw_samples = 1500, physdt = [1 / 20.0],
# bcstd = [0.5],
# phystd = [0.01], l2std = [0.02],
# bcstd = [0.05],
# phystd = [0.03], l2std = [0.02],
# param = [Normal(9, 2)],
# priorsNNw = (0.0, 1.0),
# priorsNNw = (0.0, 10.0),
# dataset = dataset,
# progress = true)

Expand Down Expand Up @@ -169,6 +169,9 @@
# plot!(t1, fluxmean)
# plot!(dataset[1][:, 2], dataset[1][:, 1])

# samples[1500]
# samples[1500]

# transsamples = [vector_to_parameters(sample, initl) for sample[1:(end - 1)] in samples]
# luxar2 = [chainl(t1', transsamples[i], st)[1] for i in 1300:1500]
# luxmean = [mean(vcat(luxar2...)[:, i]) for i in eachindex(t1)]
Expand All @@ -188,13 +191,13 @@
# using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL
# import ModelingToolkit: Interval

# @parameters x y
# @parameters x y z
# @variables p(..) q(..) r(..) s(..)
# Dx = Differential(x)
# Dy = Differential(y)

# # 2D PDE
# eq = p(x) + q(y) + Dx(r(x, y)) + Dy(s(y, x)) ~ 0
# eq = p(x) + z * q(y) + Dx(r(x, y)) + Dy(s(y, x)) ~ 0

# # Initial and boundary conditions
# bcs = [p(1) ~ 0.0f0, q(-1) ~ 0.0f0,
Expand All @@ -206,35 +209,81 @@
# y ∈ Interval(0.0, 1.0)]

# numhid = 3
# chains = [[Lux.Chain(Lux.Dense(1, numhid, Lux.σ), Lux.Dense(numhid, numhid, Lux.σ),
# Lux.Dense(numhid, 1)) for i in 1:2]
# [Lux.Chain(Lux.Dense(2, numhid, Lux.σ), Lux.Dense(numhid, numhid, Lux.σ),
# Lux.Dense(numhid, 1)) for i in 1:2]]
# discretization = NeuralPDE.PhysicsInformedNN(chains, GridTraining([0.01, 0.01]))
# chains = [
# Flux.Chain(Flux.Dense(1, numhid, σ), Flux.Dense(numhid, numhid, σ),
# Flux.Dense(numhid, 1)),
# Flux.Chain(Flux.Dense(1, numhid, σ), Flux.Dense(numhid, numhid, σ),
# Flux.Dense(numhid, 1)),
# Flux.Chain(Flux.Dense(2, numhid, σ), Flux.Dense(numhid, numhid, σ),
# Flux.Dense(numhid, 1)),
# Flux.Chain(Flux.Dense(2, numhid, σ), Flux.Dense(numhid, numhid, σ),
# Flux.Dense(numhid, 1))]

# discretization = NeuralPDE.PhysicsInformedNN(chains,
# GridTraining([0.1, 0.1]),
# param_estim = true)
# discretization.strategy
# @named pde_system = PDESystem(eq,
# bcs,
# domains,
# [x, y],
# [p(x), q(y), r(x, y), s(y, x)],
# [z],
# defaults = Dict(z => 3))
# dataset = [hcat(x̂, time), hcat(x̂, time), hcat(x̂, time, time), hcat(x̂, time, time)]

# @named pde_system = PDESystem(eq, bcs, domains, [x, y], [p(x), q(y), r(x, y), s(y, x)])
# prob = SciMLBase.discretize(pde_system, discretization)
# a = [train_set[:, 2:end]' for train_set in dataset]
# b = zip(a)
# c = [yuh for yuh in b]
# c[[2], [1:50]]
# c[2]
# c[[2], [1:50]]
# zip(a)

# mcmc_chain, samples, stats = ahmc_bayesian_pinn_pde(pde_system,
# discretization;
# draw_samples = 1500,
# bcstd = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
# phystd = [0.1],
# phystd = [0.1], l2std = [0.1, 0.1, 0.1, 0.1],
# priorsNNw = (0.0, 10.0),
# param = [Normal(3, 2)],
# dataset = dataset,
# progress = true)
# # Example dataset structure
# matrix_dep_var_1 = dataset[1]

# matrix_dep_var_2 = dataset[2]

# dataset = [matrix_dep_var_1, matrix_dep_var_2]

# # Extract independent variable values
# indep_var_values = [matrix[:, 2:end] for matrix in dataset]

# # Adapt the existing code
# eltypeθ = Float64 # Replace with your desired element type
# pde_args = [[:indep_var]]

# # Generate training sets for each variable
# # Generate training sets for each variable
# pde_train_sets = map(pde_args) do bt
# span = map(b -> vcat([indep_vars[:, b] for indep_vars in indep_var_values]...), bt)
# _set = adapt(eltypeθ, hcat(span...))
# end

# pinnrep.depvars
# firstelement(domains[1])
# infimum(domains[1])
# infimum(domains[1].domain)
# domains = [x ∈ Interval(0.0, 1.0)]
# size(domains)

# # callback = function (p, l)
# # println("Current loss is: $l")
# # return false
# # end
# callback = function (p, l)
# println("Current loss is: $l")
# return false
# end

# # res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 100)
# res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 100)

# # Paper experiments
# # function sir_ode!(u, p, t)
Expand Down

0 comments on commit d48b1c8

Please sign in to comment.