diff --git a/Project.toml b/Project.toml index 4d338dc95..ac9ceb166 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "JuliaBUGS" uuid = "ba9fb4c0-828e-4473-b6a1-cd2560fee5bf" -version = "0.7.5" +version = "0.7.6" [deps] AbstractMCMC = "80f14c24-f653-4e6a-9b94-39d6b0f70001" diff --git a/src/model.jl b/src/model.jl index a31c28c96..a37bf34f0 100644 --- a/src/model.jl +++ b/src/model.jl @@ -440,10 +440,10 @@ function AbstractPPL.evaluate!!(rng::Random.AbstractRNG, model::BUGSModel; sampl loop_vars = model.flattened_graph_node_data.loop_vars_vals[i] if_sample = sample_all || !is_observed # also sample if not observed, only sample conditioned variables if sample_all is true if !is_stochastic - value = node_function(model.evaluation_env, loop_vars) + value = node_function(evaluation_env, loop_vars) evaluation_env = setindex!!(evaluation_env, value, vn) else - dist = node_function(model.evaluation_env, loop_vars) + dist = node_function(evaluation_env, loop_vars) if if_sample value = rand(rng, dist) # just sample from the prior else @@ -473,10 +473,10 @@ function AbstractPPL.evaluate!!(model::BUGSModel) node_function = model.flattened_graph_node_data.node_function_vals[i] loop_vars = model.flattened_graph_node_data.loop_vars_vals[i] if !is_stochastic - value = node_function(model.evaluation_env, loop_vars) + value = node_function(evaluation_env, loop_vars) evaluation_env = setindex!!(evaluation_env, value, vn) else - dist = node_function(model.evaluation_env, loop_vars) + dist = node_function(evaluation_env, loop_vars) value = AbstractPPL.get(evaluation_env, vn) if model.transformed # although the values stored in `evaluation_env` are in their original space, diff --git a/test/model.jl b/test/model.jl index 6428a9346..9e44f01af 100644 --- a/test/model.jl +++ b/test/model.jl @@ -16,6 +16,46 @@ @test eval_env.x != 1.0 end +@testset "`evaluate!!` is actually modifying returned `evaluation_env`" begin + # unidentifiable coin-toss model + unid_model_def = @bugs begin + for i in 1:2 + p[i] ~ dunif(0, 1) + end + p_prod = p[1] * p[2] + n_heads ~ dbin(p_prod, n_flips) + end + data = (; n_flips=100000) + n_sim = 1000 + true_prop = 0.25 # = E[p_prod] = 0.5^2 + rng = MersenneTwister(123) + + # do multiple initializations to check for bug + for _ in 1:10 + model = compile(unid_model_def, data) + original_env = deepcopy(model.evaluation_env) + + # simulate flips and compute rate of heads + heads_rate = mean( + first(JuliaBUGS.evaluate!!(rng, model)).n_heads / data.n_flips for _ in 1:n_sim + ) + + # compute pvalue for a one-sample test against true proportion + z_true = (heads_rate - true_prop) / sqrt(true_prop * (1 - true_prop) / n_sim) + pval_true = 2 * ccdf(Normal(), abs(z_true)) + + # compute pvalue for a one-sample test against initial p_prod + z_alt = + (heads_rate - original_env.p_prod) / + sqrt(original_env.p_prod * (1 - original_env.p_prod) / n_sim) + pval_alt = 2 * ccdf(Normal(), abs(z_alt)) + + # check that simulated data is more consistent with true proportion than initial value + @test pval_true > 0.05 # simulated data consistent with true proportion + @test pval_alt < 0.05 # simulated data inconsistent with initial value + end +end + @testset "logprior and loglikelihood" begin @testset "Complex model with transformations" begin model_def = @bugs begin