Skip to content

Commit

Permalink
Stop doing sparse stuff in this package (#213)
Browse files Browse the repository at this point in the history
* Bump minor version

* Fix comment

* Allow AbstractGPs@0.4

* Remove elbo from core of package

* Remove elbo-related tests

* Update NEWS.md

* Only bump patch

* Update implementation for SparseGP object

* Remove AbstractGPs@0.3 from compat
  • Loading branch information
willtebbutt authored Aug 26, 2021
1 parent 577fce7 commit 6818fe4
Show file tree
Hide file tree
Showing 10 changed files with 24 additions and 168 deletions.
6 changes: 6 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,12 @@ between versions, and discuss new features.
If you find a breaking change this is not reported here, please either raise an issue or
make a PR to ammend this document.

## 0.7.14

AbstractGPs now takes care of everything sparsity-related.
Consequently, Stheno no longer tests anything ELBO-related, and the functionality you get
will depend entirely upon which version of AbstractGPs you're using.

## 0.7.0

### Breaking changes
Expand Down
4 changes: 2 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "Stheno"
uuid = "8188c328-b5d6-583d-959b-9690869a5511"
version = "0.7.13"
version = "0.7.14"

[deps]
AbstractGPs = "99985d1d-32ba-4be9-9821-2ec096f28918"
Expand All @@ -17,7 +17,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444"

[compat]
AbstractGPs = "0.3.9"
AbstractGPs = "0.4"
BlockArrays = "0.15, 0.16"
ChainRulesCore = "1"
FillArrays = "0.7, 0.8, 0.9, 0.10, 0.11, 0.12"
Expand Down
1 change: 0 additions & 1 deletion src/Stheno.jl
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ module Stheno
include("deprecate.jl")

export wrap, BlockData, GPC, GPPPInput, @gppp
export elbo, dtc
export , select, stretch, periodic, shift
export cov_diag, mean_and_cov_diag
end # module
2 changes: 1 addition & 1 deletion src/composite/cross.jl
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ end


#
# Util for multi-process versions of `rand`, `logpdf`, and `elbo`.
# Build a single FiniteGP from a collection of FiniteGPs.
#

function finites_to_block(fs::AV{<:FiniteGP})
Expand Down
10 changes: 7 additions & 3 deletions src/gaussian_process_probabilistic_programme.jl
Original file line number Diff line number Diff line change
Expand Up @@ -177,11 +177,15 @@ f = @gppp let
f3 = f1 + f2
end
x = GPPPInput(:f3, randn(5))
x_local = randn(5)
y = rand(f(x, 0.1))
x = BlockData(GPPPInput(:f1, x_local), GPPPInput(:f2, x_local), GPPPInput(:f3, x_local))
logpdf(f(x, 0.1), y) ≈ elbo(f(x, 0.1), y, f(x, 1e-9))
y = rand(f(x, 1e-12))
f1, f2, f3 = split(x, y)
isapprox(f1 + f2, f3; rtol=1e-4)
# output
Expand Down
6 changes: 3 additions & 3 deletions src/sparse_finite_gp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ rand(f::SparseFiniteGP, N::Int) = rand(Random.GLOBAL_RNG, f, N)
rand(rng::AbstractRNG, f::SparseFiniteGP) = vec(rand(rng, f, 1))
rand(f::SparseFiniteGP) = vec(rand(f, 1))

elbo(f::SparseFiniteGP, y::AV{<:Real}) = elbo(f.fobs, y, f.finducing)
elbo(f::SparseFiniteGP, y::AV{<:Real}) = elbo(VFE(f.finducing), f.fobs, y)

logpdf(f::SparseFiniteGP, y::AV{<:Real}) = elbo(f.fobs, y, f.finducing)
logpdf(f::SparseFiniteGP, y::AV{<:Real}) = elbo(VFE(f.finducing), f.fobs, y)

function logpdf(f::SparseFiniteGP, Y::AbstractMatrix{<:Real})
return map(y -> logpdf(f, y), eachcol(Y))
end

function posterior(f::SparseFiniteGP, y::AbstractVector{<:Real})
return approx_posterior(AbstractGPs.VFE(), f.fobs, y, f.finducing)
return posterior(AbstractGPs.VFE(f.finducing), f.fobs, y)
end
80 changes: 0 additions & 80 deletions test/abstract_gp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -73,85 +73,6 @@ end
atol=1e-9, rtol=1e-9,
)
end
@testset "logpdf / elbo / dtc" begin
rng, N, S, σ, gpc = MersenneTwister(123456), 10, 11, 1e-1, GPC()
x = collect(range(-3.0, stop=3.0, length=N))
f = wrap(GP(1, SEKernel()), gpc)
fx, y = FiniteGP(f, x, 0), FiniteGP(f, x, σ^2)
= rand(rng, y)

# Check that logpdf returns the correct type and roughly agrees with Distributions.
@test logpdf(y, ŷ) isa Real
@test logpdf(y, ŷ) logpdf(MvNormal(Vector(mean(y)), cov(y)), ŷ)

# Check that multi-sample logpdf returns the correct type and is consistent with
# single-sample logpdf
= rand(rng, y, S)
@test logpdf(y, Ŷ) isa Vector{Float64}
@test logpdf(y, Ŷ) [logpdf(y, Ŷ[:, n]) for n in 1:S]

# Check gradient of logpdf at mean is zero for `f`.
adjoint_test(ŷ->logpdf(fx, ŷ), 1, ones(size(ŷ)))
lp, back = Zygote.pullback(ŷ->logpdf(fx, ŷ), ones(size(ŷ)))
@test back(randn(rng))[1] == zeros(size(ŷ))

# Check that gradient of logpdf at mean is zero for `y`.
adjoint_test(ŷ->logpdf(y, ŷ), 1, ones(size(ŷ)))
lp, back = Zygote.pullback(ŷ->logpdf(y, ŷ), ones(size(ŷ)))
@test back(randn(rng))[1] == zeros(size(ŷ))

# Check that gradient w.r.t. inputs is approximately correct for `f`.
x, l̄ = randn(rng, N), randn(rng)
adjoint_test(
x->logpdf(FiniteGP(f, x, 1e-3), ones(size(x))),
l̄, collect(x);
atol=1e-8, rtol=1e-8,
)
adjoint_test(
x->sum(logpdf(FiniteGP(f, x, 1e-3), ones(size(Ŷ)))),
l̄, collect(x);
atol=1e-8, rtol=1e-8,
)

# Check that the gradient w.r.t. the noise is approximately correct for `f`.
σ_ = randn(rng)
adjoint_test((σ_, ŷ)->logpdf(FiniteGP(f, x, exp(σ_)), ŷ), l̄, σ_, ŷ)
adjoint_test((σ_, Ŷ)->sum(logpdf(FiniteGP(f, x, exp(σ_)), Ŷ)), l̄, σ_, Ŷ)

# Check that the gradient w.r.t. a scaling of the GP works.
adjoint_test(
α->logpdf(FiniteGP* f, x, 1e-1), ŷ), l̄, randn(rng);
atol=1e-8, rtol=1e-8,
)
adjoint_test(
α->sum(logpdf(FiniteGP* f, x, 1e-1), Ŷ)), l̄, randn(rng);
atol=1e-8, rtol=1e-8,
)

# Ensure that the elbo is close to the logpdf when appropriate.
@test elbo(y, ŷ, fx) isa Real
@test elbo(y, ŷ, fx) logpdf(y, ŷ)
@test elbo(y, ŷ, y) < logpdf(y, ŷ)
@test elbo(y, ŷ, FiniteGP(f, x, 2 * σ^2)) < elbo(y, ŷ, y)

# Check adjoint w.r.t. elbo is correct.
adjoint_test(
(x, ŷ, σ)->elbo(FiniteGP(f, x, σ^2), ŷ, FiniteGP(f, x, 0)),
randn(rng), x, ŷ, σ;
atol=1e-6, rtol=1e-6,
)

# Ensure that the dtc is close to the logpdf when appropriate.
@test dtc(y, ŷ, fx) isa Real
@test dtc(y, ŷ, fx) logpdf(y, ŷ)

# Check adjoint w.r.t. dtc is correct.
adjoint_test(
(x, ŷ, σ)->dtc(FiniteGP(f, x, σ^2), ŷ, FiniteGP(f, x, 0)),
randn(rng), x, ŷ, σ;
atol=1e-6, rtol=1e-6,
)
end
@testset "Type Stability - $T" for T in [Float64, Float32]
rng = MersenneTwister(123456)
x = randn(rng, T, 123)
Expand All @@ -164,6 +85,5 @@ end
y = rand(rng, fx)
@test y isa Vector{T}
@test logpdf(fx, y) isa T
@test elbo(fx, y, u) isa T
end
end
53 changes: 0 additions & 53 deletions test/composite/addition.jl
Original file line number Diff line number Diff line change
Expand Up @@ -89,56 +89,3 @@
)
end
end

# # θ = Dict(:l1=>0.5, :l2=>2.3);
# x, z = collect(range(-5.0, 5.0; length=512)), collect(range(-5.0, 5.0; length=128));
# y = rand(GP(sin, SqExponentialKernel(), GPC())(x, 0.1));

# foo_logpdf = (x, y) -> begin
# gpc = GPC()
# f = GP(sin, SqExponentialKernel(), gpc)
# return logpdf(f(x, 0.1), y)
# end

# foo_elbo = (x, y, z) -> begin
# f = GP(0, SqExponentialKernel(), GPC())
# return elbo(f(x, 0.1), y, f(z, 0.001))
# end

# @benchmark foo_logpdf($x, $y)
# @benchmark Zygote.pullback(foo_logpdf, $x, $y)

# let
# z, back = Zygote.pullback(foo_logpdf, x, y)
# @benchmark $back($(randn()))
# end

# let
# foo = function(x, y)
# fx = GP(0, SqExponentialKernel(), GPC())(x, 0.1)
# C = cholesky(Symmetric(cov(fx)))
# return logdet(C) + Xt_invA_X(C, y)
# end
# display(@benchmark Zygote.pullback($foo, $x, $y))
# z_pw, back_pw = Zygote.pullback(foo, x, y)
# @benchmark $back_pw(randn())
# end


# @benchmark foo_elbo($x, $y, $z)
# @benchmark Zygote.pullback(foo_elbo, $x, $y, $z)

# let
# L, back = Zygote.pullback(foo_elbo, x, y, z)
# @benchmark $back($L)
# end


# θ->begin
# gpc = GPC()
# f1 = GP(sin, SqExponentialKernel(l=θ[:l1]), gpc)
# f2 = GP(cos, SqExponentialKernel(l=θ[:l2]), gpc)
# f3 = f1 + f2
# return f3, f3
# end,
# 13, 11,
26 changes: 3 additions & 23 deletions test/composite/test_util.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
Some basic consistency checks for the function `f(θ)::Tuple{GP, GP}`. Mainly just checks
that Zygote works properly for `f`, and correctly derives the gradients w.r.t. `θ` for
`rand`, `logpdf`, `elbo` when considering the f.d.d.s `f(x, Σ)` and observations `y`, where
`rand`, `logpdf`, when considering the f.d.d.s `f(x, Σ)` and observations `y`, where
`Σ = _to_psd(A)`. The first output of `f` will be the GP sampled from and whose `logpdf`
will be computed, while the second will be used as the process for the pseudo-points, whose
inputs are `z`.
Expand Down Expand Up @@ -46,7 +46,7 @@ function check_consistency(rng::AbstractRNG, θ, f, x::AV, y::AV, A, z::AV, B)


# #
# # rand / logpdf / elbo tests
# # rand / logpdf tests
# #

# # Check that the gradient w.r.t. the samples is correct (single-sample).
Expand All @@ -69,19 +69,8 @@ function check_consistency(rng::AbstractRNG, θ, f, x::AV, y::AV, A, z::AV, B)
# rtol=1e-4, atol=1e-4,
# )

# # Check adjoint for elbo.
# adjoint_test(
# (ϴ, x, A, y, z, B)->begin
# fx, uz = h(θ, x, A, z, B)
# return elbo(fx, y, uz)
# end,
# randn(rng), θ, x, A, y, z, B;
# rtol=1e-4, atol=1e-4,
# )


# #
# # multi-process rand / logpdf / elbo tests - this stuff won't work for anything if
# # multi-process rand / logpdf tests - this stuff won't work for anything if
# # cross-related functionality doesn't work properly
# #

Expand Down Expand Up @@ -120,15 +109,6 @@ function check_consistency(rng::AbstractRNG, θ, f, x::AV, y::AV, A, z::AV, B)
# randn(rng), θ, x, A, y;
# rtol=1e-4, atol=1e-4,
# )

# adjoint_test(
# (ϴ, x, A, y, z, B)->begin
# fx, uz = h(θ, x, A, z, B)
# return elbo([fx, fx], [y, y], [uz, uz])
# end,
# randn(rng), θ, x, A, y, z, B;
# rtol=1e-4, atol=1e-4,
# )
end

function standard_1D_dense_test(rng::AbstractRNG, θ, f, x::AV, z::AV)
Expand Down
4 changes: 2 additions & 2 deletions test/sparse_finite_gp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,12 @@
fxu = SparseFiniteGP(f(x, σ), f(xu, σu))
y = rand(MersenneTwister(12345), fxu)

fpost1 = approx_posterior(VFE(), fxu.fobs, y, fxu.finducing)
fpost1 = posterior(VFE(fxu.finducing), fxu.fobs, y)
fpost2 = posterior(fxu, y)

@test marginals(fpost1(x)) == marginals(fpost2(x))
@test elbo(fxu, y) == logpdf(fxu, y)
@test logpdf(fxu, y) == elbo(fxu.fobs, y, fxu.finducing)
@test logpdf(fxu, y) == elbo(VFE(fxu.finducing),fxu.fobs, y)
yy = rand(fxu, 10)
@test all(logpdf(fx, yy) .> logpdf(fxu, yy))
end
Expand Down

2 comments on commit 6818fe4

@willtebbutt
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register()

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/43612

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.7.14 -m "<description of version>" 6818fe4a3d5ac3519d57060a6c4694a80647ee29
git push origin v0.7.14

Please sign in to comment.