Skip to content

Commit

Permalink
unbreake
Browse files Browse the repository at this point in the history
  • Loading branch information
CarloLucibello committed Oct 26, 2024
1 parent 1fb6f9d commit ae3539c
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 10 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ ChainRulesCore = "1.12"
Compat = "4.10.0"
Enzyme = "0.12, 0.13"
Functors = "0.4"
MLDataDevices = "1.4.1"
MLDataDevices = "1.4.2"
MLUtils = "0.4"
MPI = "0.20.19"
MacroTools = "0.5"
Expand Down
20 changes: 11 additions & 9 deletions test/ext_cuda/cuda.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
using Flux, Test
using Flux, Test, Zygote
using Flux: cpu, gpu
using Statistics: mean
using LinearAlgebra: I, cholesky, Cholesky
using LinearAlgebra: I, cholesky, Cholesky, Adjoint
using SparseArrays: sparse, SparseMatrixCSC, AbstractSparseArray
using CUDA
CUDA.allowscalar(false)

@testset "CUDA" begin
x = randn(5, 5)
Expand Down Expand Up @@ -48,11 +50,11 @@ end
# construct from CuArray
x = [1, 3, 2]
y = Flux.onehotbatch(x, 0:3)
@test_skip begin # https://github.com/FluxML/OneHotArrays.jl/issues/16

# https://github.com/FluxML/OneHotArrays.jl/issues/16
y2 = Flux.onehotbatch(x |> gpu, 0:3)
@test y2.indices isa CuArray
@test y2 |> cpu == y
end
end

@testset "onecold gpu" begin
Expand Down Expand Up @@ -104,12 +106,12 @@ end
# Trivial functions
@test gradient(x -> sum(abs, gpu(x)), a)[1] isa Matrix
@test gradient(x -> sum(gpu(x)), a)[1] isa Matrix
@test_skip gradient(x -> sum(gpu(x)), a')[1] isa Matrix # sum(::Adjoint{T,CuArray}) makes a Fill
@test_broken gradient(x -> sum(gpu(x)), a')[1] isa Matrix # sum(::Adjoint{T,CuArray}) makes a Fill
@test gradient(x -> sum(abs, cpu(x)), ca)[1] isa CuArray
# This test should really not go through indirections and pull out Fills for efficiency
# but we forcefully materialise. TODO: remove materialising CuArray here
@test gradient(x -> sum(cpu(x)), ca)[1] isa CuArray # This involves FillArray, which should be GPU compatible
@test gradient(x -> sum(cpu(x)), ca')[1] isa Adjoint{Float32, <:CuArray}
@test gradient(x -> sum(cpu(x)), ca')[1] isa CuArray

# Even more trivial: no movement
@test gradient(x -> sum(abs, cpu(x)), a)[1] isa Matrix
Expand All @@ -131,8 +133,8 @@ end

# Scalar indexing of an array, needs OneElement to transfer to GPU
# https://github.com/FluxML/Zygote.jl/issues/1005
@test gradient(x -> cpu(2 .* gpu(x))[1], Float32[1,2,3]) == ([2,0,0],)
@test gradient(x -> cpu(gpu(x) * gpu(x))[1,2], Float32[1 2 3; 4 5 6; 7 8 9]) == ([2 6 8; 0 2 0; 0 3 0],)
@test_broken gradient(x -> cpu(2 .* gpu(x))[1], Float32[1,2,3]) == ([2,0,0],)
@test_broken gradient(x -> cpu(gpu(x) * gpu(x))[1,2], Float32[1 2 3; 4 5 6; 7 8 9]) == ([2 6 8; 0 2 0; 0 3 0],)
end

@testset "gpu(x) and cpu(x) on structured arrays" begin
Expand Down Expand Up @@ -198,7 +200,7 @@ end
post2 = Flux.DataLoader((x=X, y=Y); batchsize=7, shuffle=false) |> gpu
for (p, q) in zip(pre2, post2)
@test p.x == q.x
@test_skip p.y == q.y # https://github.com/FluxML/OneHotArrays.jl/issues/28 -- MethodError: getindex(::OneHotArrays.OneHotMatrix{UInt32, CuArray{UInt32, 1, CUDA.Mem.DeviceBuffer}}, ::Int64, ::Int64) is ambiguous
@test_broken p.y == q.y # https://github.com/FluxML/OneHotArrays.jl/issues/28 -- MethodError: getindex(::OneHotArrays.OneHotMatrix{UInt32, CuArray{UInt32, 1, CUDA.Mem.DeviceBuffer}}, ::Int64, ::Int64) is ambiguous
end

@test collect(pre2) isa Vector{<:NamedTuple{(:x, :y)}}
Expand Down

0 comments on commit ae3539c

Please sign in to comment.