Skip to content

Commit

Permalink
use default_rng when comparing acceleration methods
Browse files Browse the repository at this point in the history
  • Loading branch information
ablaom committed May 1, 2024
1 parent c806dc5 commit 99100b8
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 8 deletions.
3 changes: 1 addition & 2 deletions test/classifier.jl
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,8 @@ losses = []
# check flux model is an improvement on predicting constant
# distribution
# (GPUs only support `default_rng`):
rng = accel == CPU1() ? StableRNGs.StableRNG(123) : Random.default_rng()
rng = Random.default_rng()
seed!(rng, 123)
rng = StableRNGs.StableRNG(123)
model = MLJFlux.NeuralNetworkClassifier(epochs=50,
builder=builder,
optimiser=optimiser,
Expand Down
6 changes: 3 additions & 3 deletions test/image.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ losses = []
@testset_accelerated "ImageClassifier basic tests" accel begin

# GPUs only support `default_rng`:
rng = accel == CPU1() ? StableRNGs.StableRNG(123) : Random.default_rng()
rng = Random.default_rng()
seed!(rng, 123)

model = MLJFlux.ImageClassifier(builder=builder,
Expand Down Expand Up @@ -86,7 +86,7 @@ losses = []
@testset_accelerated "ColorImages" accel begin

# GPUs only support `default_rng`:
rng = accel == CPU1() ? StableRNGs.StableRNG(123) : Random.default_rng()
rng = Random.default_rng()
seed!(rng, 123)

model = MLJFlux.ImageClassifier(builder=builder,
Expand Down Expand Up @@ -129,7 +129,7 @@ noise=0.2, color=true);
@testset_accelerated "ImageClassifier basic tests" accel begin

# GPUs only support `default_rng`:
rng = accel == CPU1() ? StableRNGs.StableRNG(123) : Random.default_rng()
rng = Random.default_rng()
seed!(rng, 123)

model = MLJFlux.ImageClassifier(epochs=5,
Expand Down
4 changes: 2 additions & 2 deletions test/regressor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ train, test = MLJBase.partition(1:N, 0.7)

# test model is a bit better than constant predictor:
# (GPUs only support `default_rng`):
rng = accel == CPU1() ? StableRNGs.StableRNG(123) : Random.default_rng()
rng = Random.default_rng()
seed!(rng, 123)
model = MLJFlux.NeuralNetworkRegressor(builder=builder,
acceleration=accel,
Expand Down Expand Up @@ -108,7 +108,7 @@ losses = []

# test model is a bit better than constant predictor
# (GPUs only support `default_rng`):
rng = accel == CPU1() ? StableRNGs.StableRNG(123) : Random.default_rng()
rng = Random.default_rng()
seed!(rng, 123)
model = MLJFlux.MultitargetNeuralNetworkRegressor(
acceleration=accel,
Expand Down
1 change: 0 additions & 1 deletion test/test_utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,6 @@ function optimisertest(ModelType, X, y, builder, optimiser, accel)
@test isapprox(l1, l2)
else
@test_broken isapprox(l1, l2, rtol=1e-8)
@show l1/l2
end

# USING USER SPECIFIED RNG SEED (unsupported on GPU)
Expand Down

0 comments on commit 99100b8

Please sign in to comment.