Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace broadcasting over distributions with broadcasting with partially applied functions #1818

Merged
merged 2 commits into from
Jan 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions src/deprecates.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ for fun in [:pdf, :logpdf,
fun! = Symbol(fun, '!')

@eval begin
@deprecate ($_fun!)(r::AbstractArray{<:Real}, d::UnivariateDistribution, X::AbstractArray{<:Real}) r .= ($fun).(d, X) false
@deprecate ($fun!)(r::AbstractArray{<:Real}, d::UnivariateDistribution, X::AbstractArray{<:Real}) r .= ($fun).(d, X) false
@deprecate ($fun)(d::UnivariateDistribution, X::AbstractArray{<:Real}) ($fun).(d, X)
@deprecate ($_fun!)(r::AbstractArray{<:Real}, d::UnivariateDistribution, X::AbstractArray{<:Real}) r .= Base.Fix1($fun, d).(X) false
@deprecate ($fun!)(r::AbstractArray{<:Real}, d::UnivariateDistribution, X::AbstractArray{<:Real}) r .= Base.Fix1($fun, d).(X) false
@deprecate ($fun)(d::UnivariateDistribution, X::AbstractArray{<:Real}) map(Base.Fix1($fun, d), X)
end
end

@deprecate pdf(d::DiscreteUnivariateDistribution) pdf.(Ref(d), support(d))
@deprecate pdf(d::DiscreteUnivariateDistribution) map(Base.Fix1(pdf, d), support(d))

# Wishart constructors
@deprecate Wishart(df::Real, S::AbstractPDMat, warn::Bool) Wishart(df, S)
Expand Down
8 changes: 4 additions & 4 deletions src/mixtures/mixturemodel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ function _mixpdf!(r::AbstractArray, d::AbstractMixtureModel, x)
pi = p[i]
if pi > 0.0
if d isa UnivariateMixture
t .= pdf.(component(d, i), x)
t .= Base.Fix1(pdf, component(d, i)).(x)
else
pdf!(t, component(d, i), x)
end
Expand Down Expand Up @@ -326,7 +326,7 @@ function _mixlogpdf!(r::AbstractArray, d::AbstractMixtureModel, x)
lp_i = view(Lp, :, i)
# compute logpdf in batch and store
if d isa UnivariateMixture
lp_i .= logpdf.(component(d, i), x)
lp_i .= Base.Fix1(logpdf, component(d, i)).(x)
else
logpdf!(lp_i, component(d, i), x)
end
Expand Down Expand Up @@ -398,7 +398,7 @@ function _cwise_pdf!(r::AbstractMatrix, d::AbstractMixtureModel, X)
size(r) == (n, K) || error("The size of r is incorrect.")
for i = 1:K
if d isa UnivariateMixture
view(r,:,i) .= pdf.(Ref(component(d, i)), X)
view(r,:,i) .= Base.Fix1(pdf, component(d, i)).(X)
else
pdf!(view(r,:,i),component(d, i), X)
end
Expand All @@ -412,7 +412,7 @@ function _cwise_logpdf!(r::AbstractMatrix, d::AbstractMixtureModel, X)
size(r) == (n, K) || error("The size of r is incorrect.")
for i = 1:K
if d isa UnivariateMixture
view(r,:,i) .= logpdf.(Ref(component(d, i)), X)
view(r,:,i) .= Base.Fix1(logpdf, component(d, i)).(X)
else
logpdf!(view(r,:,i), component(d, i), X)
end
Expand Down
2 changes: 1 addition & 1 deletion src/multivariate/jointorderstatistics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ function _rand!(rng::AbstractRNG, d::JointOrderStatistics, x::AbstractVector{<:R
else
s += randexp(rng, T)
end
x .= quantile.(d.dist, x ./ s)
x .= Base.Fix1(quantile, d.dist).(x ./ s)
end
return x
end
4 changes: 2 additions & 2 deletions src/qq.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,14 @@ function qqbuild(x::AbstractVector, d::UnivariateDistribution)
n = length(x)
grid = ppoints(n)
qx = quantile(x, grid)
qd = quantile.(Ref(d), grid)
qd = map(Base.Fix1(quantile, d), grid)
return QQPair(qx, qd)
end

function qqbuild(d::UnivariateDistribution, x::AbstractVector)
n = length(x)
grid = ppoints(n)
qd = quantile.(Ref(d), grid)
qd = map(Base.Fix1(quantile, d), grid)
qx = quantile(x, grid)
return QQPair(qd, qx)
end
2 changes: 1 addition & 1 deletion src/univariate/continuous/uniform.jl
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ Base.:*(c::Real, d::Uniform) = Uniform(minmax(c * d.a, c * d.b)...)
rand(rng::AbstractRNG, d::Uniform) = d.a + (d.b - d.a) * rand(rng)

_rand!(rng::AbstractRNG, d::Uniform, A::AbstractArray{<:Real}) =
A .= quantile.(d, rand!(rng, A))
A .= Base.Fix1(quantile, d).(rand!(rng, A))


#### Fitting
Expand Down
13 changes: 8 additions & 5 deletions src/univariate/discrete/betabinomial.jl
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,15 @@ for f in (:ccdf, :logcdf, :logccdf)
end
end

entropy(d::BetaBinomial) = entropy(Categorical(pdf.(Ref(d),support(d))))
median(d::BetaBinomial) = median(Categorical(pdf.(Ref(d),support(d)))) - 1
mode(d::BetaBinomial) = argmax(pdf.(Ref(d),support(d))) - 1
modes(d::BetaBinomial) = modes(Categorical(pdf.(Ref(d),support(d)))) .- 1
# Shifted categorical distribution corresponding to `BetaBinomial`
_categorical(d::BetaBinomial) = Categorical(map(Base.Fix1(pdf, d), support(d)))

quantile(d::BetaBinomial, p::Float64) = quantile(Categorical(pdf.(Ref(d), support(d))), p) - 1
entropy(d::BetaBinomial) = entropy(_categorical(d))
median(d::BetaBinomial) = median(_categorical(d)) - 1
mode(d::BetaBinomial) = mode(_categorical(d)) - 1
modes(d::BetaBinomial) = modes(_categorical(d)) .- 1

quantile(d::BetaBinomial, p::Float64) = quantile(_categorical(d), p) - 1

#### Sampling

Expand Down
2 changes: 1 addition & 1 deletion src/univariate/discrete/hypergeometric.jl
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ function kurtosis(d::Hypergeometric)
a/b
end

entropy(d::Hypergeometric) = entropy(pdf.(Ref(d), support(d)))
entropy(d::Hypergeometric) = entropy(map(Base.Fix1(pdf, d), support(d)))

### Evaluation & Sampling

Expand Down
9 changes: 6 additions & 3 deletions src/univariate/discrete/noncentralhypergeometric.jl
Original file line number Diff line number Diff line change
Expand Up @@ -256,9 +256,12 @@ end
Base.convert(::Type{WalleniusNoncentralHypergeometric{T}}, d::WalleniusNoncentralHypergeometric{T}) where {T<:Real} = d

# Properties
mean(d::WalleniusNoncentralHypergeometric) = sum(support(d) .* pdf.(Ref(d), support(d)))
var(d::WalleniusNoncentralHypergeometric) = sum((support(d) .- mean(d)).^2 .* pdf.(Ref(d), support(d)))
mode(d::WalleniusNoncentralHypergeometric) = support(d)[argmax(pdf.(Ref(d), support(d)))]
function _discretenonparametric(d::WalleniusNoncentralHypergeometric)
return DiscreteNonParametric(support(d), map(Base.Fix1(pdf, d), support(d)))
end
mean(d::WalleniusNoncentralHypergeometric) = mean(_discretenonparametric(d))
var(d::WalleniusNoncentralHypergeometric) = var(_discretenonparametric(d))
mode(d::WalleniusNoncentralHypergeometric) = mode(_discretenonparametric(d))

entropy(d::WalleniusNoncentralHypergeometric) = 1

Expand Down
8 changes: 4 additions & 4 deletions test/censored.jl
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ end
end
@test @inferred(median(d)) ≈ clamp(median(d0), l, u)
@inferred quantile(d, 0.5)
@test quantile.(d, 0:0.01:1) ≈ clamp.(quantile.(d0, 0:0.01:1), l, u)
@test Base.Fix1(quantile, d).(0:0.01:1) ≈ clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u)
# special-case pdf/logpdf/loglikelihood since when replacing Dirac(μ) with
# Normal(μ, 0), they are infinite
if lower === nothing || !isfinite(lower)
Expand Down Expand Up @@ -253,7 +253,7 @@ end
@test f(d) ≈ f(dmix)
end
@test median(d) ≈ clamp(median(d0), l, u)
@test quantile.(d, 0:0.01:1) ≈ clamp.(quantile.(d0, 0:0.01:1), l, u)
@test Base.Fix1(quantile, d).(0:0.01:1) ≈ clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u)
# special-case pdf/logpdf/loglikelihood since when replacing Dirac(μ) with
# Normal(μ, 0), they are infinite
if lower === nothing
Expand Down Expand Up @@ -311,7 +311,7 @@ end
end
@test @inferred(median(d)) ≈ clamp(median(d0), l, u)
@inferred quantile(d, 0.5)
@test quantile.(d, 0:0.01:1) ≈ clamp.(quantile.(d0, 0:0.01:1), l, u)
@test Base.Fix1(quantile, d).(0:0.01:1) ≈ clamp.(Base.Fix1(quantile, d0).(0:0.01:1), l, u)
# rand
x = rand(d, 10_000)
@test all(x -> insupport(d, x), x)
Expand Down Expand Up @@ -346,7 +346,7 @@ end
@test f(d, 5) ≈ f(dmix, 5)
end
@test median(d) ≈ clamp(median(d0), l, u)
@test quantile.(d, 0:0.01:0.99) ≈ clamp.(quantile.(d0, 0:0.01:0.99), l, u)
@test Base.Fix1(quantile, d).(0:0.01:0.99) ≈ clamp.(Base.Fix1(quantile, d0).(0:0.01:0.99), l, u)
x = rand(d, 100)
@test loglikelihood(d, x) ≈ loglikelihood(dmix, x)
# rand
Expand Down
2 changes: 1 addition & 1 deletion test/matrixvariates.jl
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ function test_convert(d::MatrixDistribution)
@test d == deepcopy(d)
for elty in (Float32, Float64, BigFloat)
del1 = convert(distname{elty}, d)
del2 = convert(distname{elty}, getfield.(Ref(d), fieldnames(typeof(d)))...)
del2 = convert(distname{elty}, (Base.Fix1(getfield, d)).(fieldnames(typeof(d)))...)
@test del1 isa distname{elty}
@test del2 isa distname{elty}
@test partype(del1) == elty
Expand Down
2 changes: 1 addition & 1 deletion test/mixture.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ function test_mixture(g::UnivariateMixture, n::Int, ns::Int,
for i = 1:n
@test @inferred(cdf(g, X[i])) ≈ cf[i]
end
@test cdf.(g, X) ≈ cf
@test Base.Fix1(cdf, g).(X) ≈ cf

# evaluation
P0 = zeros(T, n, K)
Expand Down
3 changes: 1 addition & 2 deletions test/multivariate/product.jl
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ end

for a in ([0, 1], [-0.5, 0.5])
# Construct independent distributions and `Product` distribution from these.
support = fill(a, N)
ds = DiscreteNonParametric.(support, Ref([0.5, 0.5]))
ds = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ in 1:N]
x = rand.(ds)
d_product = product_distribution(ds)
@test d_product isa Product
Expand Down
2 changes: 1 addition & 1 deletion test/product.jl
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ end

for a in ([0, 1], [-0.5, 0.5])
# Construct independent distributions and `ProductDistribution` from these.
ds1 = DiscreteNonParametric.(fill(a, N), Ref([0.5, 0.5]))
ds1 = [DiscreteNonParametric(copy(a), [0.5, 0.5]) for _ in 1:N]
# Replace with
# d_product1 = @inferred(product_distribution(ds1))
# when `Product` is removed
Expand Down
46 changes: 23 additions & 23 deletions test/testutils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable
rmin = floor(Int,quantile(distr, 0.00001))::Int
rmax = floor(Int,quantile(distr, 0.99999))::Int
m = rmax - rmin + 1 # length of the range
p0 = pdf.(Ref(distr), rmin:rmax) # reference probability masses
p0 = map(Base.Fix1(pdf, distr), rmin:rmax) # reference probability masses
@assert length(p0) == m

# determine confidence intervals for counts:
Expand Down Expand Up @@ -233,7 +233,7 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable
#
clb = Vector{Int}(undef, nbins)
cub = Vector{Int}(undef, nbins)
cdfs = cdf.(Ref(distr), edges)
cdfs = map(Base.Fix1(cdf, distr), edges)

for i = 1:nbins
pi = cdfs[i+1] - cdfs[i]
Expand Down Expand Up @@ -385,19 +385,19 @@ function test_range_evaluation(d::DiscreteUnivariateDistribution)
rmin = round(Int, islowerbounded(d) ? vmin : quantile(d, 0.001))::Int
rmax = round(Int, isupperbounded(d) ? vmax : quantile(d, 0.999))::Int

p0 = pdf.(Ref(d), collect(rmin:rmax))
@test pdf.(Ref(d), rmin:rmax) ≈ p0
p0 = map(Base.Fix1(pdf, d), collect(rmin:rmax))
@test map(Base.Fix1(pdf, d), rmin:rmax) ≈ p0
if rmin + 2 <= rmax
@test pdf.(Ref(d), rmin+1:rmax-1) ≈ p0[2:end-1]
@test map(Base.Fix1(pdf, d), rmin+1:rmax-1) ≈ p0[2:end-1]
end

if isbounded(d)
@test pdf.(Ref(d), support(d)) ≈ p0
@test pdf.(Ref(d), rmin-2:rmax) ≈ vcat(0.0, 0.0, p0)
@test pdf.(Ref(d), rmin:rmax+3) ≈ vcat(p0, 0.0, 0.0, 0.0)
@test pdf.(Ref(d), rmin-2:rmax+3) ≈ vcat(0.0, 0.0, p0, 0.0, 0.0, 0.0)
@test map(Base.Fix1(pdf, d), support(d)) ≈ p0
@test map(Base.Fix1(pdf, d), rmin-2:rmax) ≈ vcat(0.0, 0.0, p0)
@test map(Base.Fix1(pdf, d), rmin:rmax+3) ≈ vcat(p0, 0.0, 0.0, 0.0)
@test map(Base.Fix1(pdf, d), rmin-2:rmax+3) ≈ vcat(0.0, 0.0, p0, 0.0, 0.0, 0.0)
elseif islowerbounded(d)
@test pdf.(Ref(d), rmin-2:rmax) ≈ vcat(0.0, 0.0, p0)
@test map(Base.Fix1(pdf, d), rmin-2:rmax) ≈ vcat(0.0, 0.0, p0)
end
end

Expand Down Expand Up @@ -444,13 +444,13 @@ function test_evaluation(d::DiscreteUnivariateDistribution, vs::AbstractVector,
end

# consistency of scalar-based and vectorized evaluation
@test pdf.(Ref(d), vs) ≈ p
@test cdf.(Ref(d), vs) ≈ c
@test ccdf.(Ref(d), vs) ≈ cc
@test Base.Fix1(pdf, d).(vs) ≈ p
@test Base.Fix1(cdf, d).(vs) ≈ c
@test Base.Fix1(ccdf, d).(vs) ≈ cc

@test logpdf.(Ref(d), vs) ≈ lp
@test logcdf.(Ref(d), vs) ≈ lc
@test logccdf.(Ref(d), vs) ≈ lcc
@test Base.Fix1(logpdf, d).(vs) ≈ lp
@test Base.Fix1(logcdf, d).(vs) ≈ lc
@test Base.Fix1(logccdf, d).(vs) ≈ lcc
end


Expand Down Expand Up @@ -511,15 +511,15 @@ function test_evaluation(d::ContinuousUnivariateDistribution, vs::AbstractVector

# consistency of scalar-based and vectorized evaluation
if !isa(d, StudentizedRange)
@test pdf.(Ref(d), vs) ≈ p
@test logpdf.(Ref(d), vs) ≈ lp
@test Base.Fix1(pdf, d).(vs) ≈ p
@test Base.Fix1(logpdf, d).(vs) ≈ lp
end

@test cdf.(Ref(d), vs) ≈ c
@test ccdf.(Ref(d), vs) ≈ cc
@test Base.Fix1(cdf, d).(vs) ≈ c
@test Base.Fix1(ccdf, d).(vs) ≈ cc

@test logcdf.(Ref(d), vs) ≈ lc
@test logccdf.(Ref(d), vs) ≈ lcc
@test Base.Fix1(logcdf, d).(vs) ≈ lc
@test Base.Fix1(logccdf, d).(vs) ≈ lcc
end

function test_nonfinite(distr::UnivariateDistribution)
Expand Down Expand Up @@ -550,7 +550,7 @@ function test_stats(d::DiscreteUnivariateDistribution, vs::AbstractVector)
# using definition (or an approximation)

vf = Float64[v for v in vs]
p = pdf.(Ref(d), vf)
p = Base.Fix1(pdf, d).(vf)
xmean = dot(p, vf)
xvar = dot(p, abs2.(vf .- xmean))
xstd = sqrt(xvar)
Expand Down
8 changes: 4 additions & 4 deletions test/univariate/continuous/johnsonsu.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
@test rand(d1) isa Float64

@test median(d1) == quantile(d1, 0.5)
x = quantile.(d1, [0.25, 0.45, 0.60, 0.80, 0.90])
@test all(cdf.(d1, x) .≈ [0.25, 0.45, 0.60, 0.80, 0.90])
y = cquantile.(d1, [0.25, 0.45, 0.60, 0.80, 0.90])
@test all(ccdf.(d1, y) .≈ [0.25, 0.45, 0.60, 0.80, 0.90])
x = Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90])
@test all(Base.Fix1(cdf, d1).(x) .≈ [0.25, 0.45, 0.60, 0.80, 0.90])
y = Base.Fix1(cquantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90])
@test all(Base.Fix1(ccdf, d1).(y) .≈ [0.25, 0.45, 0.60, 0.80, 0.90])

@test mean(d1) ≈ 7.581281
@test var(d1) ≈ 19.1969485
Expand Down
2 changes: 1 addition & 1 deletion test/univariate/continuous/logitnormal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ function test_logitnormal(g::LogitNormal, n_tsamples::Int=10^6,
for i = 1:min(100, n_tsamples)
@test logpdf(g, X[i]) ≈ log(pdf(g, X[i]))
end
@test logpdf.(g, X) ≈ log.(pdf.(g, X))
@test Base.Fix1(logpdf, g).(X) ≈ log.(Base.Fix1(pdf, g).(X))
@test isequal(logpdf(g, 0),-Inf)
@test isequal(logpdf(g, 1),-Inf)
@test isequal(logpdf(g, -eps()),-Inf)
Expand Down
10 changes: 5 additions & 5 deletions test/univariate/continuous/rician.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@
@test var(d1) ≈ var(d2)
@test mode(d1) ≈ mode(d2)
@test median(d1) ≈ median(d2)
@test quantile.(d1, [0.25, 0.45, 0.60, 0.80, 0.90]) ≈ quantile.(d2, [0.25, 0.45, 0.60, 0.80, 0.90])
@test pdf.(d1, 0.0:0.1:1.0) ≈ pdf.(d2, 0.0:0.1:1.0)
@test cdf.(d1, 0.0:0.1:1.0) ≈ cdf.(d2, 0.0:0.1:1.0)
@test Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90]) ≈ Base.Fix1(quantile, d2).([0.25, 0.45, 0.60, 0.80, 0.90])
@test Base.Fix1(pdf, d1).(0.0:0.1:1.0) ≈ Base.Fix1(pdf, d2).(0.0:0.1:1.0)
@test Base.Fix1(cdf, d1).(0.0:0.1:1.0) ≈ Base.Fix1(cdf, d2).(0.0:0.1:1.0)

d1 = Rician(10.0, 10.0)
@test median(d1) == quantile(d1, 0.5)
x = quantile.(d1, [0.25, 0.45, 0.60, 0.80, 0.90])
@test all(cdf.(d1, x) .≈ [0.25, 0.45, 0.60, 0.80, 0.90])
x = Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90])
@test all(Base.Fix1(cdf, d1).(x) .≈ [0.25, 0.45, 0.60, 0.80, 0.90])

x = rand(Rician(5.0, 5.0), 100000)
d1 = fit(Rician, x)
Expand Down
2 changes: 1 addition & 1 deletion test/univariate/continuous/skewnormal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import Distributions: normpdf, normcdf, normlogpdf, normlogcdf
d4 = Normal(0.5, 2.2)
#
@test pdf(d3, 3.3) == Distributions.pdf(d4, 3.3)
@test pdf.(d3, 1:3) == Distributions.pdf.(d4, 1:3)
@test Base.Fix1(pdf, d3).(1:3) == Base.Fix1(pdf, d4).(1:3)
a = mean(d3), var(d3), std(d3)
b = Distributions.mean(d4), Distributions.var(d4), Distributions.std(d4)
@test a == b
Expand Down
4 changes: 2 additions & 2 deletions test/univariate/discrete/binomial.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ Random.seed!(1234)
for (p, n) in [(0.6, 10), (0.8, 6), (0.5, 40), (0.04, 20), (1., 100), (0., 10), (0.999999, 1000), (1e-7, 1000)]
d = Binomial(n, p)

a = pdf.(d, 0:n)
a = Base.Fix1(pdf, d).(0:n)
for t=0:n
@test pdf(d, t) ≈ a[1+t]
end

li = rand(0:n, 2)
rng = minimum(li):maximum(li)
b = pdf.(d, rng)
b = Base.Fix1(pdf, d).(rng)
for t in rng
@test pdf(d, t) ≈ b[t - first(rng) + 1]
end
Expand Down
4 changes: 2 additions & 2 deletions test/univariate/discrete/categorical.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ for p in Any[
@test iszero(ccdf(d, Inf))
@test isnan(ccdf(d, NaN))

@test pdf.(d, support(d)) == p
@test pdf.(d, 1:k) == p
@test Base.Fix1(pdf, d).(support(d)) == p
@test Base.Fix1(pdf, d).(1:k) == p

@test cf(d, 0) ≈ 1.0
@test cf(d, 1) ≈ p' * cis.(1:length(p))
Expand Down
6 changes: 3 additions & 3 deletions test/univariate/discrete/poissonbinomial.jl
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,9 @@ for (n₁, n₂, n₃, p₁, p₂, p₃) in [(10, 10, 10, 0.1, 0.5, 0.9),
b2 = Binomial(n₂, p₂)
b3 = Binomial(n₃, p₃)

pmf1 = pdf.(b1, support(b1))
pmf2 = pdf.(b2, support(b2))
pmf3 = pdf.(b3, support(b3))
pmf1 = Base.Fix1(pdf, b1).(support(b1))
pmf2 = Base.Fix1(pdf, b2).(support(b2))
pmf3 = Base.Fix1(pdf, b3).(support(b3))

@test @inferred(mean(d)) ≈ (mean(b1) + mean(b2) + mean(b3))
@test @inferred(var(d)) ≈ (var(b1) + var(b2) + var(b3))
Expand Down
Loading
Loading