Skip to content

Commit c064825

Browse files
Auto-format Julia code with JuliaFormatter
1 parent cbaa04b commit c064825

File tree

5 files changed

+35
-28
lines changed

5 files changed

+35
-28
lines changed

src/mcmc/external_sampler.jl

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ struct ExternalSampler{S<:AbstractSampler,AD<:ADTypes.AbstractADType,Unconstrain
2828
function ExternalSampler(
2929
sampler::AbstractSampler,
3030
adtype::ADTypes.AbstractADType,
31-
::Val{unconstrained}=Val(true),
31+
(::Val{unconstrained})=Val(true),
3232
) where {unconstrained}
3333
if !(unconstrained isa Bool)
3434
throw(
@@ -44,9 +44,11 @@ end
4444
4545
Return `true` if the sampler requires unconstrained space, and `false` otherwise.
4646
"""
47-
requires_unconstrained_space(
47+
function requires_unconstrained_space(
4848
::ExternalSampler{<:Any,<:Any,Unconstrained}
49-
) where {Unconstrained} = Unconstrained
49+
) where {Unconstrained}
50+
Unconstrained
51+
end
5052

5153
"""
5254
externalsampler(sampler::AbstractSampler; adtype=AutoForwardDiff(), unconstrained=true)

test/mcmc/Inference.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ using Turing
297297
chain = sample(StableRNG(seed), gauss(x), PG(10), 10)
298298
chain = sample(StableRNG(seed), gauss(x), SMC(), 10)
299299

300-
@model function gauss2(::Type{TV}=Vector{Float64}; x) where {TV}
300+
@model function gauss2((::Type{TV})=Vector{Float64}; x) where {TV}
301301
priors = TV(undef, 2)
302302
priors[1] ~ InverseGamma(2, 3) # s
303303
priors[2] ~ Normal(0, sqrt(priors[1])) # m
@@ -321,7 +321,7 @@ using Turing
321321
StableRNG(seed), gauss2(DynamicPPL.TypeWrap{Vector{Float64}}(); x=x), SMC(), 10
322322
)
323323

324-
@model function gauss3(x, ::Type{TV}=Vector{Float64}) where {TV}
324+
@model function gauss3(x, (::Type{TV})=Vector{Float64}) where {TV}
325325
priors = TV(undef, 2)
326326
priors[1] ~ InverseGamma(2, 3) # s
327327
priors[2] ~ Normal(0, sqrt(priors[1])) # m
@@ -548,7 +548,7 @@ using Turing
548548
N = 10
549549
alg = HMC(0.01, 5)
550550
x = randn(1000)
551-
@model function vdemo1(::Type{T}=Float64) where {T}
551+
@model function vdemo1((::Type{T})=Float64) where {T}
552552
x = Vector{T}(undef, N)
553553
for i in 1:N
554554
x[i] ~ Normal(0, sqrt(4))
@@ -563,7 +563,7 @@ using Turing
563563
vdemo1kw(; T) = vdemo1(T)
564564
sample(StableRNG(seed), vdemo1kw(; T=DynamicPPL.TypeWrap{Float64}()), alg, 10)
565565

566-
@model function vdemo2(::Type{T}=Float64) where {T<:Real}
566+
@model function vdemo2((::Type{T})=Float64) where {T<:Real}
567567
x = Vector{T}(undef, N)
568568
@. x ~ Normal(0, 2)
569569
end
@@ -574,7 +574,7 @@ using Turing
574574
vdemo2kw(; T) = vdemo2(T)
575575
sample(StableRNG(seed), vdemo2kw(; T=DynamicPPL.TypeWrap{Float64}()), alg, 10)
576576

577-
@model function vdemo3(::Type{TV}=Vector{Float64}) where {TV<:AbstractVector}
577+
@model function vdemo3((::Type{TV})=Vector{Float64}) where {TV<:AbstractVector}
578578
x = TV(undef, N)
579579
@. x ~ InverseGamma(2, 3)
580580
end

test/mcmc/gibbs.jl

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -141,8 +141,9 @@ end
141141
inner::Alg
142142
end
143143

144-
unwrap_sampler(sampler::DynamicPPL.Sampler{<:AlgWrapper}) =
145-
DynamicPPL.Sampler(sampler.alg.inner)
144+
unwrap_sampler(sampler::DynamicPPL.Sampler{<:AlgWrapper}) = DynamicPPL.Sampler(
145+
sampler.alg.inner
146+
)
146147

147148
# Methods we need to define to be able to use AlgWrapper instead of an actual algorithm.
148149
# They all just propagate the call to the inner algorithm.
@@ -201,7 +202,7 @@ end
201202
end
202203

203204
# A test model that includes several different kinds of tilde syntax.
204-
@model function test_model(val, ::Type{M}=Vector{Float64}) where {M}
205+
@model function test_model(val, (::Type{M})=Vector{Float64}) where {M}
205206
s ~ Normal(0.1, 0.2)
206207
m ~ Poisson()
207208
val ~ Normal(s, 1)
@@ -508,7 +509,7 @@ end
508509
end
509510

510511
@testset "dynamic model" begin
511-
@model function imm(y, alpha, ::Type{M}=Vector{Float64}) where {M}
512+
@model function imm(y, alpha, (::Type{M})=Vector{Float64}) where {M}
512513
N = length(y)
513514
rpm = DirichletProcess(alpha)
514515

@@ -574,7 +575,7 @@ end
574575

575576
@testset "dynamic model with dot tilde" begin
576577
@model function dynamic_model_with_dot_tilde(
577-
num_zs=10, ::Type{M}=Vector{Float64}
578+
num_zs=10, (::Type{M})=Vector{Float64}
578579
) where {M}
579580
z = Vector{Int}(undef, num_zs)
580581
z .~ Poisson(1.0)
@@ -720,7 +721,7 @@ end
720721
struct Wrap{T}
721722
a::T
722723
end
723-
@model function model1(::Type{T}=Float64) where {T}
724+
@model function model1((::Type{T})=Float64) where {T}
724725
x = Vector{T}(undef, 1)
725726
x[1] ~ Normal()
726727
y = Wrap{T}(0.0)

test/mcmc/hmc.jl

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,8 @@ using Turing
107107
w13 ~ MvNormal([0.0; 0.0], [var_prior 0.0; 0.0 var_prior])
108108
bo ~ Normal(0, var_prior)
109109

110-
wo ~ MvNormal(
111-
[0.0; 0; 0], [var_prior 0.0 0.0; 0.0 var_prior 0.0; 0.0 0.0 var_prior]
112-
)
110+
wo ~
111+
MvNormal([0.0; 0; 0], [var_prior 0.0 0.0; 0.0 var_prior 0.0; 0.0 0.0 var_prior])
113112
for i in rand(1:N, 10)
114113
y = nn(xs[i], b1, w11, w12, w13, bo, wo)
115114
ts[i] ~ Bernoulli(y)
@@ -215,7 +214,7 @@ using Turing
215214
end
216215

217216
@testset "(partially) issue: #2095" begin
218-
@model function vector_of_dirichlet(::Type{TV}=Vector{Float64}) where {TV}
217+
@model function vector_of_dirichlet((::Type{TV})=Vector{Float64}) where {TV}
219218
xs = Vector{TV}(undef, 2)
220219
xs[1] ~ Dirichlet(ones(5))
221220
return xs[2] ~ Dirichlet(ones(5))

test/optimisation/Optimisation.jl

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,9 @@ using Turing
3333
end
3434
DynamicPPL.NodeTrait(::OverrideContext) = DynamicPPL.IsParent()
3535
DynamicPPL.childcontext(parent::OverrideContext) = parent.context
36-
DynamicPPL.setchildcontext(parent::OverrideContext, child) =
37-
OverrideContext(child, parent.logprior_weight, parent.loglikelihood_weight)
36+
DynamicPPL.setchildcontext(parent::OverrideContext, child) = OverrideContext(
37+
child, parent.logprior_weight, parent.loglikelihood_weight
38+
)
3839

3940
# Only implement what we need for the models above.
4041
function DynamicPPL.tilde_assume(context::OverrideContext, right, vn, vi)
@@ -215,8 +216,9 @@ using Turing
215216
Random.seed!(222)
216217
true_value = [0.0625, 1.75]
217218
true_logp = loglikelihood(gdemo_default, (s=true_value[1], m=true_value[2]))
218-
check_success(result, check_retcode=true) =
219-
check_optimisation_result(result, true_value, true_logp, check_retcode)
219+
check_success(result, check_retcode=true) = check_optimisation_result(
220+
result, true_value, true_logp, check_retcode
221+
)
220222

221223
lb = [0.0, 0.0]
222224
ub = [2.0, 2.0]
@@ -275,8 +277,9 @@ using Turing
275277
Random.seed!(222)
276278
true_value = [49 / 54, 7 / 6]
277279
true_logp = logjoint(gdemo_default, (s=true_value[1], m=true_value[2]))
278-
check_success(result, check_retcode=true) =
279-
check_optimisation_result(result, true_value, true_logp, check_retcode)
280+
check_success(result, check_retcode=true) = check_optimisation_result(
281+
result, true_value, true_logp, check_retcode
282+
)
280283

281284
lb = [0.0, 0.0]
282285
ub = [2.0, 2.0]
@@ -336,8 +339,9 @@ using Turing
336339
Random.seed!(222)
337340
true_value = [0.0625, 1.75]
338341
true_logp = loglikelihood(gdemo_default, (s=true_value[1], m=true_value[2]))
339-
check_success(result, check_retcode=true) =
340-
check_optimisation_result(result, true_value, true_logp, check_retcode)
342+
check_success(result, check_retcode=true) = check_optimisation_result(
343+
result, true_value, true_logp, check_retcode
344+
)
341345

342346
# Set two constraints: The first parameter must be non-negative, and the L2 norm
343347
# of the parameters must be between 0.5 and 2.
@@ -389,8 +393,9 @@ using Turing
389393
Random.seed!(222)
390394
true_value = [49 / 54, 7 / 6]
391395
true_logp = logjoint(gdemo_default, (s=true_value[1], m=true_value[2]))
392-
check_success(result, check_retcode=true) =
393-
check_optimisation_result(result, true_value, true_logp, check_retcode)
396+
check_success(result, check_retcode=true) = check_optimisation_result(
397+
result, true_value, true_logp, check_retcode
398+
)
394399

395400
# Set two constraints: The first parameter must be non-negative, and the L2 norm
396401
# of the parameters must be between 0.5 and 2.

0 commit comments

Comments
 (0)