Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove some v0.13 deprecations #2493

Merged
merged 8 commits into from
Oct 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ steps:
using Pkg
Pkg.resolve()'
commands: |
printf "[MLDataDevices]\ngpu_backend = \"Metal\"\n" > LocalPreferences.toml
printf "[Flux]\ngpu_backend = \"Metal\"\n" > LocalPreferences.toml

if: build.message !~ /\[skip tests\]/
timeout_in_minutes: 60
Expand All @@ -63,7 +63,7 @@ steps:
- label: "AMD GPU with Julia 1"
plugins:
- JuliaCI/julia#v1:
version: "1.10"
version: "1"
- JuliaCI/julia-test#v1:
- JuliaCI/julia-coverage#v1:
dirs:
Expand All @@ -74,7 +74,7 @@ steps:
rocm: "*"
rocmgpu: "*"
commands: |
printf "[MLDataDevices]\ngpu_backend = \"AMDGPU\"\n" > LocalPreferences.toml
printf "[Flux]\ngpu_backend = \"AMDGPU\"\n" > LocalPreferences.toml
timeout_in_minutes: 60
env:
JULIA_AMDGPU_CORE_MUST_LOAD: "1"
Expand Down
24 changes: 15 additions & 9 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,23 @@ on:
- master
tags: '*'

# needed for julia-actions/cache to delete old caches
permissions:
actions: write
contents: read

jobs:
test:
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
version:
# - '1.9' # Uncomment when 1.10 is out. Replace this with the minimum Julia version that your package supports.
- '1'
- '1.10' # Replace this with the minimum Julia version that your package supports.
- '1' # latest stable 1.x releas
# - 'pre' # latest stable prerelease
- 'nightly' # latest nightly release
os: [ubuntu-latest]
arch: [x64]
include:
Expand All @@ -29,7 +36,7 @@ jobs:
version: '1'
arch: aarch64
steps:
- uses: actions/checkout@v4.2.1
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: ${{ matrix.version }}
Expand All @@ -47,17 +54,16 @@ jobs:
- uses: julia-actions/julia-buildpkg@v1
- name: "Run test without coverage report"
uses: julia-actions/julia-runtest@v1
if: ${{ !contains(fromJson('["1", "1.9"]'), matrix.version) || matrix.os != 'ubuntu-latest' }}
if: matrix.version != '1' || matrix.os != 'ubuntu-latest'
with:
coverage: false

- name: "Run test with coverage report"
uses: julia-actions/julia-runtest@v1
if: contains(fromJson('["1", "1.9"]'), matrix.version) && matrix.os == 'ubuntu-latest'
if: matrix.version == '1' && matrix.os == 'ubuntu-latest'
- uses: julia-actions/julia-processcoverage@v1
if: contains(fromJson('["1", "1.9"]'), matrix.version) && matrix.os == 'ubuntu-latest'
if: matrix.version == '1' && matrix.os == 'ubuntu-latest'
- uses: codecov/codecov-action@v4
if: contains(fromJson('["1", "1.9"]'), matrix.version) && matrix.os == 'ubuntu-latest'
if: matrix.version == '1' && matrix.os == 'ubuntu-latest'
with:
file: lcov.info

Expand Down
42 changes: 0 additions & 42 deletions .github/workflows/nightly-ci.yml

This file was deleted.

2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,4 @@ SpecialFunctions = "2.1.2"
Statistics = "1"
Zygote = "0.6.67"
cuDNN = "1"
julia = "1.9"
julia = "1.10"
2 changes: 1 addition & 1 deletion ext/FluxAMDGPUExt/FluxAMDGPUExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import Flux
import Flux: FluxCPUAdaptor, FluxAMDGPUAdaptor, _amd, adapt_storage, fmap
import Flux: DenseConvDims, Conv, ConvTranspose, conv, conv_reshape_bias
import NNlib

using MLDataDevices: MLDataDevices
using AMDGPU
using Adapt
using Random
Expand Down
1 change: 1 addition & 0 deletions ext/FluxCUDAExt/FluxCUDAExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ using ChainRulesCore
using Random
using Adapt
import Adapt: adapt_storage
using MLDataDevices: MLDataDevices


const USE_CUDA = Ref{Union{Nothing, Bool}}(nothing)
Expand Down
2 changes: 1 addition & 1 deletion ext/FluxCUDAExt/functor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -57,5 +57,5 @@ function _cuda(id::Union{Nothing, Int}, x)
end

function Flux._get_device(::Val{:CUDA}, id::Int)
return MLDataUtils.gpu_device(id+1, force=true)
return MLDataDevices.gpu_device(id+1, force=true)
end
4 changes: 0 additions & 4 deletions ext/FluxEnzymeExt/FluxEnzymeExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,6 @@ _applyloss(loss, model, d...) = loss(model, d...)

EnzymeRules.inactive(::typeof(Flux.Losses._check_sizes), args...) = true

using Flux: _old_to_new # from src/deprecations.jl
train!(loss, model::Duplicated, data, opt::Optimise.AbstractOptimiser; cb=nothing) =
train!(loss, model, data, _old_to_new(opt); cb)

function train!(loss, model::Duplicated, data, rule::Optimisers.AbstractRule; cb = nothing)
train!(loss, model, data, _rule_to_state(model, rule); cb)
end
Expand Down
2 changes: 1 addition & 1 deletion ext/FluxMetalExt/FluxMetalExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import Flux
import Flux: FluxCPUAdaptor, FluxMetalAdaptor, _metal, _isleaf, adapt_storage, fmap
import NNlib
using ChainRulesCore

using MLDataDevices: MLDataDevices
using Metal
using Adapt
using Random
Expand Down
2 changes: 1 addition & 1 deletion ext/FluxMetalExt/functor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,6 @@ end

function Flux._get_device(::Val{:Metal}, id::Int)
@assert id == 0 "Metal backend only supports one device at the moment"
return MLDataDevices.gpu_device()
return MLDataDevices.gpu_device(force=true)
end

5 changes: 3 additions & 2 deletions src/Flux.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,14 @@ using Zygote: Params, @adjoint, gradient, pullback
using Zygote.ForwardDiff: value
export gradient

@reexport using MLDataDevices: MLDataDevices, gpu_backend!, supported_gpu_backends, reset_gpu_device!,
@reexport using MLDataDevices: MLDataDevices, supported_gpu_backends, reset_gpu_device!,
default_device_rng,
gpu_device, cpu_device, xla_device,
CPUDevice,
CUDADevice, AMDGPUDevice, MetalDevice, oneAPIDevice,
XLADevice,
# get_device, # we define get_device here for retrocompatibility
# gpu_backend!, # have to define here due to https://github.com/JuliaPackaging/Preferences.jl/issues/39
get_device_type,
DeviceIterator

Expand Down Expand Up @@ -104,7 +105,7 @@ include("losses/Losses.jl")
using .Losses

include("devices.jl")
export get_device
export get_device, gpu_backend!

# Distributed Training
include("distributed/backend.jl")
Expand Down
95 changes: 0 additions & 95 deletions src/deprecations.jl
Original file line number Diff line number Diff line change
@@ -1,47 +1,6 @@

# v0.13 deprecations

function Broadcast.broadcasted(f::Recur, args...)
# This had an explicit @adjoint rule, calling Zygote.∇map(__context__, f, args...), until v0.12
Base.depwarn("""Broadcasting is not safe to use with RNNs, as it does not guarantee an iteration order.
Re-writing this as a comprehension would be better.""", :broadcasted)
map(f, args...) # map isn't really safe either, but
end

@deprecate frequencies(xs) group_counts(xs)

struct Zeros
function Zeros()
Base.depwarn("Flux.Zeros is no more, has ceased to be, is bereft of life, is an ex-boondoggle... please use bias=false instead", :Zeros)
false
end
end
Zeros(args...) = Zeros() # was used both Dense(10, 2, initb = Zeros) and Dense(rand(2,10), Zeros())

function Optimise.update!(x::AbstractArray, x̄)
Base.depwarn("`Flux.Optimise.update!(x, x̄)` was not used internally and has been removed. Please write `x .-= x̄` instead.", :update!)
x .-= x̄
end

function Diagonal(size::Integer...; kw...)
Base.depwarn("Flux.Diagonal is now Flux.Scale, and also allows an activation function.", :Diagonal)
Scale(size...; kw...)
end
function Diagonal(size::Tuple; kw...)
Base.depwarn("Flux.Diagonal is now Flux.Scale, and also allows an activation function.", :Diagonal)
Scale(size...; kw...)
end

# Deprecate this eventually once saving models w/o structure is no more
function loadparams!(m, xs)
Base.depwarn("loadparams! will be deprecated eventually. Use loadmodel! instead.", :loadparams!)
for (p, x) in zip(params(m), xs)
size(p) == size(x) ||
error("Expected param size $(size(p)), got $(size(x))")
copyto!(p, x)
end
end

# Channel notation: Changed to match Conv, but very softly deprecated!
# Perhaps change to @deprecate for v0.15, but there is no plan to remove these.
Dense(in::Integer, out::Integer, σ = identity; kw...) =
Expand All @@ -56,32 +15,6 @@ LSTMCell(in::Integer, out::Integer; kw...) = LSTMCell(in => out; kw...)
GRUCell(in::Integer, out::Integer; kw...) = GRUCell(in => out; kw...)
GRUv3Cell(in::Integer, out::Integer; kw...) = GRUv3Cell(in => out; kw...)

# Optimisers with old naming convention
Base.@deprecate_binding ADAM Adam
Base.@deprecate_binding NADAM NAdam
Base.@deprecate_binding ADAMW AdamW
Base.@deprecate_binding RADAM RAdam
Base.@deprecate_binding OADAM OAdam
Base.@deprecate_binding ADAGrad AdaGrad
Base.@deprecate_binding ADADelta AdaDelta

# Remove sub-module Data, while making sure Flux.Data.DataLoader keeps working
Base.@deprecate_binding Data Flux false "Sub-module Flux.Data has been removed. The only thing it contained may be accessed as Flux.DataLoader"

@deprecate paramtype(T,m) _paramtype(T,m) false # internal method, renamed to make this clear

@deprecate rng_from_array() Random.default_rng()

function istraining()
Base.depwarn("Flux.istraining() is deprecated, use NNlib.within_gradient(x) instead", :istraining)
false
end
ChainRulesCore.rrule(::typeof(istraining)) = true, _ -> (NoTangent(),)

function _isactive(m)
Base.depwarn("_isactive(m) is deprecated, use _isactive(m,x)", :_isactive, force=true)
_isactive(m, 1:0)
end

#=
# Valid method in Optimise, old implicit style, is:
Expand Down Expand Up @@ -110,7 +43,6 @@ train!(loss, ps::Params, data, opt::Optimisers.AbstractRule; cb=nothing) = error
train!(loss, model, data, opt::Optimise.AbstractOptimiser; cb=nothing) =
train!(loss, model, data, _old_to_new(opt); cb)


# Next, to use the new `setup` with the still-exported old-style `Adam` etc:
import .Train: setup
setup(rule::Optimise.AbstractOptimiser, model) = setup(_old_to_new(rule), model)
Expand Down Expand Up @@ -179,33 +111,6 @@ function update!(opt::Optimise.AbstractOptimiser, ::Params, grads::Union{Tuple,
""")
end

"""
trainmode!(m, active)

!!! warning
This two-argument method is deprecated.

Possible values of `active` are:
- `true` for training, or
- `false` for testing, same as [`testmode!`](@ref)`(m)`
- `:auto` or `nothing` for Flux to detect training automatically.
"""
function trainmode!(m, active::Bool)
Base.depwarn("trainmode!(m, active::Bool) is deprecated", :trainmode)
testmode!(m, !active)
end

# Greek-letter keywords deprecated in Flux 0.13
# Arguments (old => new, :function, "β" => "beta")
function _greek_ascii_depwarn(βbeta::Pair, func = :loss, names = "" => "")
Base.depwarn(LazyString("function ", func, " no longer accepts greek-letter keyword ", names.first, """
please use ascii """, names.second, " instead"), func)
βbeta.first
end
_greek_ascii_depwarn(βbeta::Pair{Nothing}, _...) = βbeta.second

ChainRulesCore.@non_differentiable _greek_ascii_depwarn(::Any...)


# v0.14 deprecations
@deprecate default_rng_value() Random.default_rng()
Expand Down
14 changes: 11 additions & 3 deletions src/functor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -189,11 +189,19 @@ _isleaf(::Union{Transpose, Adjoint, PermutedDimsArray}) = false

_isleaf(::AbstractRNG) = true

# Remove when
# https://github.com/JuliaPackaging/Preferences.jl/issues/39
# is resolved
function gpu_backend!(backend::String)
@set_preferences!("gpu_backend" => backend)
MLDataDevices.gpu_backend!(backend)
end

# the order below is important
const GPU_BACKENDS = ("CUDA", "AMDGPU", "Metal", "CPU")
const GPU_BACKEND_ORDER = Dict(collect(zip(GPU_BACKENDS, 1:length(GPU_BACKENDS))))
const GPU_BACKEND = load_preference(MLDataDevices, "gpu_backend", "CUDA")

# const GPU_BACKEND = load_preference(MLDataDevices, "gpu_backend", "CUDA")
# https://github.com/JuliaPackaging/Preferences.jl/issues/39
const GPU_BACKEND = @load_preference("gpu_backend", "CUDA")

"""
gpu(m)
Expand Down
Loading
Loading