Skip to content
This repository has been archived by the owner on Nov 4, 2024. It is now read-only.

refactor!: rename round 2 to MLDataDevices #62

Merged
merged 3 commits into from
Jul 24, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name = "DeviceUtils"
name = "MLDataDevices"
uuid = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40"
authors = ["Avik Pal <[email protected]> and contributors"]
version = "1.0.0"
Expand Down Expand Up @@ -26,18 +26,18 @@ cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
oneAPI = "8f75cd03-7ff8-4ecb-9b8f-daf728133b1b"

[extensions]
DeviceUtilsAMDGPUExt = "AMDGPU"
DeviceUtilsCUDAExt = "CUDA"
DeviceUtilsFillArraysExt = "FillArrays"
DeviceUtilsGPUArraysExt = "GPUArrays"
DeviceUtilsMetalExt = ["GPUArrays", "Metal"]
DeviceUtilsRecursiveArrayToolsExt = "RecursiveArrayTools"
DeviceUtilsReverseDiffExt = "ReverseDiff"
DeviceUtilsSparseArraysExt = "SparseArrays"
DeviceUtilsTrackerExt = "Tracker"
DeviceUtilsZygoteExt = "Zygote"
DeviceUtilscuDNNExt = ["CUDA", "cuDNN"]
DeviceUtilsoneAPIExt = ["GPUArrays", "oneAPI"]
MLDataDevicesAMDGPUExt = "AMDGPU"
MLDataDevicesCUDAExt = "CUDA"
MLDataDevicesFillArraysExt = "FillArrays"
MLDataDevicesGPUArraysExt = "GPUArrays"
MLDataDevicesMetalExt = ["GPUArrays", "Metal"]
MLDataDevicesRecursiveArrayToolsExt = "RecursiveArrayTools"
MLDataDevicesReverseDiffExt = "ReverseDiff"
MLDataDevicesSparseArraysExt = "SparseArrays"
MLDataDevicesTrackerExt = "Tracker"
MLDataDevicesZygoteExt = "Zygote"
MLDataDevicescuDNNExt = ["CUDA", "cuDNN"]
MLDataDevicesoneAPIExt = ["GPUArrays", "oneAPI"]

[compat]
AMDGPU = "0.9.6"
Expand Down
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
# DeviceUtils
# MLDataDevices

[![Join the chat at https://julialang.zulipchat.com #machine-learning](https://img.shields.io/static/v1?label=Zulip&message=chat&color=9558b2&labelColor=389826)](https://julialang.zulipchat.com/#narrow/stream/machine-learning)
[![Latest Docs](https://img.shields.io/badge/docs-latest-blue.svg)](https://lux.csail.mit.edu/dev/api/Accelerator_Support/LuxDeviceUtils)
[![Stable Docs](https://img.shields.io/badge/docs-stable-blue.svg)](https://lux.csail.mit.edu/stable/api/Accelerator_Support/LuxDeviceUtils)

[![CI](https://github.com/LuxDL/DeviceUtils.jl/actions/workflows/CI.yml/badge.svg)](https://github.com/LuxDL/DeviceUtils.jl/actions/workflows/CI.yml)
[![Buildkite](https://badge.buildkite.com/b098d6387b2c69bd0ab684293ff66332047b219e1b8f9bb486.svg?branch=main)](https://buildkite.com/julialang/DeviceUtils-dot-jl)
[![codecov](https://codecov.io/gh/LuxDL/DeviceUtils.jl/branch/main/graph/badge.svg?token=1ZY0A2NPEM)](https://codecov.io/gh/LuxDL/DeviceUtils.jl)
[![CI](https://github.com/LuxDL/MLDataDevices.jl/actions/workflows/CI.yml/badge.svg)](https://github.com/LuxDL/MLDataDevices.jl/actions/workflows/CI.yml)
[![Buildkite](https://badge.buildkite.com/b098d6387b2c69bd0ab684293ff66332047b219e1b8f9bb486.svg?branch=main)](https://buildkite.com/julialang/MLDataDevices-dot-jl)
[![codecov](https://codecov.io/gh/LuxDL/MLDataDevices.jl/branch/main/graph/badge.svg?token=1ZY0A2NPEM)](https://codecov.io/gh/LuxDL/MLDataDevices.jl)
[![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl)

[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
[![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle)

`DeviceUtils.jl` is a lightweight package defining rules for transferring data across
`MLDataDevices.jl` is a lightweight package defining rules for transferring data across
devices. It is used in deep learning frameworks such as [Lux.jl](https://lux.csail.mit.edu/).

Currently we provide support for the following backends:
Expand All @@ -24,6 +24,6 @@ Currently we provide support for the following backends:

## Updating to v1.0

* Package was renamed from `LuxDeviceUtils.jl` to `DeviceUtils.jl`.
* Package was renamed from `LuxDeviceUtils.jl` to `MLDataDevices.jl`.
* `Lux(***)Device` has been renamed to `(***)Device`.
* `Lux(***)Adaptor` objects have been removed. Use `(***)Device` objects instead.
27 changes: 0 additions & 27 deletions ext/DeviceUtilsMetalExt.jl

This file was deleted.

17 changes: 0 additions & 17 deletions ext/DeviceUtilsReverseDiffExt.jl

This file was deleted.

32 changes: 16 additions & 16 deletions ext/DeviceUtilsAMDGPUExt.jl → ext/MLDataDevicesAMDGPUExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ module DeviceUtilsAMDGPUExt

using Adapt: Adapt
using AMDGPU: AMDGPU
using DeviceUtils: DeviceUtils, AMDGPUDevice, CPUDevice, reset_gpu_device!
using MLDataDevices: MLDataDevices, AMDGPUDevice, CPUDevice, reset_gpu_device!
using Random: Random

__init__() = reset_gpu_device!()
Expand All @@ -21,16 +21,16 @@ function _check_use_amdgpu!()
return
end

DeviceUtils.loaded(::Union{AMDGPUDevice, <:Type{AMDGPUDevice}}) = true
function DeviceUtils.functional(::Union{AMDGPUDevice, <:Type{AMDGPUDevice}})::Bool
MLDataDevices.loaded(::Union{AMDGPUDevice, <:Type{AMDGPUDevice}}) = true
function MLDataDevices.functional(::Union{AMDGPUDevice, <:Type{AMDGPUDevice}})::Bool
_check_use_amdgpu!()
return USE_AMD_GPU[]
end

function DeviceUtils._with_device(::Type{AMDGPUDevice}, ::Nothing)
function MLDataDevices._with_device(::Type{AMDGPUDevice}, ::Nothing)
return AMDGPUDevice(nothing)
end
function DeviceUtils._with_device(::Type{AMDGPUDevice}, id::Integer)
function MLDataDevices._with_device(::Type{AMDGPUDevice}, id::Integer)
id > length(AMDGPU.devices()) &&
throw(ArgumentError("id = $id > length(AMDGPU.devices()) = $(length(AMDGPU.devices()))"))
old_dev = AMDGPU.device()
Expand All @@ -40,38 +40,38 @@ function DeviceUtils._with_device(::Type{AMDGPUDevice}, id::Integer)
return device
end

DeviceUtils._get_device_id(dev::AMDGPUDevice) = AMDGPU.device_id(dev.device)
MLDataDevices._get_device_id(dev::AMDGPUDevice) = AMDGPU.device_id(dev.device)

# Default RNG
DeviceUtils.default_device_rng(::AMDGPUDevice) = AMDGPU.rocrand_rng()
MLDataDevices.default_device_rng(::AMDGPUDevice) = AMDGPU.rocrand_rng()

# Query Device from Array
function DeviceUtils._get_device(x::AMDGPU.AnyROCArray)
function MLDataDevices._get_device(x::AMDGPU.AnyROCArray)
parent_x = parent(x)
parent_x === x && return AMDGPUDevice(AMDGPU.device(x))
return DeviceUtils._get_device(parent_x)
return MLDataDevices._get_device(parent_x)
end

DeviceUtils._get_device_type(::AMDGPU.AnyROCArray) = AMDGPUDevice
MLDataDevices._get_device_type(::AMDGPU.AnyROCArray) = AMDGPUDevice

# Set Device
function DeviceUtils.set_device!(::Type{AMDGPUDevice}, dev::AMDGPU.HIPDevice)
function MLDataDevices.set_device!(::Type{AMDGPUDevice}, dev::AMDGPU.HIPDevice)
return AMDGPU.device!(dev)
end
function DeviceUtils.set_device!(::Type{AMDGPUDevice}, id::Integer)
return DeviceUtils.set_device!(AMDGPUDevice, AMDGPU.devices()[id])
function MLDataDevices.set_device!(::Type{AMDGPUDevice}, id::Integer)
return MLDataDevices.set_device!(AMDGPUDevice, AMDGPU.devices()[id])
end
function DeviceUtils.set_device!(::Type{AMDGPUDevice}, ::Nothing, rank::Integer)
function MLDataDevices.set_device!(::Type{AMDGPUDevice}, ::Nothing, rank::Integer)
id = mod1(rank + 1, length(AMDGPU.devices()))
return DeviceUtils.set_device!(AMDGPUDevice, id)
return MLDataDevices.set_device!(AMDGPUDevice, id)
end

# Device Transfer
## To GPU
Adapt.adapt_storage(::AMDGPUDevice{Nothing}, x::AbstractArray) = AMDGPU.roc(x)
function Adapt.adapt_storage(to::AMDGPUDevice, x::AbstractArray)
old_dev = AMDGPU.device() # remember the current device
dev = DeviceUtils.get_device(x)
dev = MLDataDevices.get_device(x)
if !(dev isa AMDGPUDevice)
AMDGPU.device!(to.device)
x_new = AMDGPU.roc(x)
Expand Down
32 changes: 16 additions & 16 deletions ext/DeviceUtilsCUDAExt.jl → ext/MLDataDevicesCUDAExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ module DeviceUtilsCUDAExt
using Adapt: Adapt
using CUDA: CUDA
using CUDA.CUSPARSE: AbstractCuSparseMatrix, AbstractCuSparseVector
using DeviceUtils: DeviceUtils, CUDADevice, CPUDevice
using MLDataDevices: MLDataDevices, CUDADevice, CPUDevice
using Random: Random

function DeviceUtils._with_device(::Type{CUDADevice}, id::Integer)
function MLDataDevices._with_device(::Type{CUDADevice}, id::Integer)
id > length(CUDA.devices()) &&
throw(ArgumentError("id = $id > length(CUDA.devices()) = $(length(CUDA.devices()))"))
old_dev = CUDA.device()
Expand All @@ -16,47 +16,47 @@ function DeviceUtils._with_device(::Type{CUDADevice}, id::Integer)
return device
end

function DeviceUtils._with_device(::Type{CUDADevice}, ::Nothing)
function MLDataDevices._with_device(::Type{CUDADevice}, ::Nothing)
return CUDADevice(nothing)
end

DeviceUtils._get_device_id(dev::CUDADevice) = CUDA.deviceid(dev.device) + 1
MLDataDevices._get_device_id(dev::CUDADevice) = CUDA.deviceid(dev.device) + 1

# Default RNG
DeviceUtils.default_device_rng(::CUDADevice) = CUDA.default_rng()
MLDataDevices.default_device_rng(::CUDADevice) = CUDA.default_rng()

# Query Device from Array
function DeviceUtils._get_device(x::CUDA.AnyCuArray)
function MLDataDevices._get_device(x::CUDA.AnyCuArray)
parent_x = parent(x)
parent_x === x && return CUDADevice(CUDA.device(x))
return DeviceUtils.get_device(parent_x)
return MLDataDevices.get_device(parent_x)
end
function DeviceUtils._get_device(x::CUDA.CUSPARSE.AbstractCuSparseArray)
function MLDataDevices._get_device(x::CUDA.CUSPARSE.AbstractCuSparseArray)
return CUDADevice(CUDA.device(x.nzVal))
end

function DeviceUtils._get_device_type(::Union{
function MLDataDevices._get_device_type(::Union{
<:CUDA.AnyCuArray, <:CUDA.CUSPARSE.AbstractCuSparseArray})
return CUDADevice
end

# Set Device
function DeviceUtils.set_device!(::Type{CUDADevice}, dev::CUDA.CuDevice)
function MLDataDevices.set_device!(::Type{CUDADevice}, dev::CUDA.CuDevice)
return CUDA.device!(dev)
end
function DeviceUtils.set_device!(::Type{CUDADevice}, id::Integer)
return DeviceUtils.set_device!(CUDADevice, collect(CUDA.devices())[id])
function MLDataDevices.set_device!(::Type{CUDADevice}, id::Integer)
return MLDataDevices.set_device!(CUDADevice, collect(CUDA.devices())[id])
end
function DeviceUtils.set_device!(::Type{CUDADevice}, ::Nothing, rank::Integer)
function MLDataDevices.set_device!(::Type{CUDADevice}, ::Nothing, rank::Integer)
id = mod1(rank + 1, length(CUDA.devices()))
return DeviceUtils.set_device!(CUDADevice, id)
return MLDataDevices.set_device!(CUDADevice, id)
end

# Device Transfer
Adapt.adapt_storage(::CUDADevice{Nothing}, x::AbstractArray) = CUDA.cu(x)
function Adapt.adapt_storage(to::CUDADevice, x::AbstractArray)
old_dev = CUDA.device() # remember the current device
dev = DeviceUtils.get_device(x)
dev = MLDataDevices.get_device(x)
if !(dev isa CUDADevice)
CUDA.device!(to.device)
x_new = CUDA.cu(x)
Expand Down Expand Up @@ -84,7 +84,7 @@ Adapt.adapt_storage(::CPUDevice, rng::CUDA.RNG) = Random.default_rng()
end
else
@warn "CUDA.CUSPARSE seems to have removed SparseArrays as a dependency. Please open \
an issue in DeviceUtils.jl repository."
an issue in MLDataDevices.jl repository."
end

end
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ module DeviceUtilsFillArraysExt

using Adapt: Adapt
using FillArrays: FillArrays, AbstractFill
using DeviceUtils: DeviceUtils, CPUDevice, AbstractDevice
using MLDataDevices: MLDataDevices, CPUDevice, AbstractDevice

Adapt.adapt_structure(::CPUDevice, x::AbstractFill) = x
Adapt.adapt_structure(to::AbstractDevice, x::AbstractFill) = Adapt.adapt(to, collect(x))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ module DeviceUtilsGPUArraysExt

using Adapt: Adapt
using GPUArrays: GPUArrays
using DeviceUtils: CPUDevice
using MLDataDevices: CPUDevice
using Random: Random

Adapt.adapt_storage(::CPUDevice, rng::GPUArrays.RNG) = Random.default_rng()
Expand Down
27 changes: 27 additions & 0 deletions ext/MLDataDevicesMetalExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
module DeviceUtilsMetalExt

using Adapt: Adapt
using GPUArrays: GPUArrays
using MLDataDevices: MLDataDevices, MetalDevice, reset_gpu_device!
using Metal: Metal, MtlArray

__init__() = reset_gpu_device!()

MLDataDevices.loaded(::Union{MetalDevice, Type{<:MetalDevice}}) = true
function MLDataDevices.functional(::Union{MetalDevice, Type{<:MetalDevice}})
return Metal.functional()
end

# Default RNG
MLDataDevices.default_device_rng(::MetalDevice) = GPUArrays.default_rng(MtlArray)

# Query Device from Array
MLDataDevices._get_device(::MtlArray) = MetalDevice()

MLDataDevices._get_device_type(::MtlArray) = MetalDevice

# Device Transfer
## To GPU
Adapt.adapt_storage(::MetalDevice, x::AbstractArray) = Metal.mtl(x)

end
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
module DeviceUtilsRecursiveArrayToolsExt

using Adapt: Adapt, adapt
using DeviceUtils: DeviceUtils, AbstractDevice
using MLDataDevices: MLDataDevices, AbstractDevice
using RecursiveArrayTools: VectorOfArray, DiffEqArray

# We want to preserve the structure
Expand All @@ -15,9 +15,9 @@ function Adapt.adapt_structure(to::AbstractDevice, x::DiffEqArray)
end

for op in (:_get_device, :_get_device_type)
@eval function DeviceUtils.$op(x::Union{VectorOfArray, DiffEqArray})
@eval function MLDataDevices.$op(x::Union{VectorOfArray, DiffEqArray})
length(x.u) == 0 && return $(op == :_get_device ? nothing : Nothing)
return mapreduce(DeviceUtils.$op, DeviceUtils.__combine_devices, x.u)
return mapreduce(MLDataDevices.$op, MLDataDevices.__combine_devices, x.u)
end
end

Expand Down
17 changes: 17 additions & 0 deletions ext/MLDataDevicesReverseDiffExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
module DeviceUtilsReverseDiffExt

using MLDataDevices: MLDataDevices
using ReverseDiff: ReverseDiff

for op in (:_get_device, :_get_device_type)
@eval begin
function MLDataDevices.$op(x::ReverseDiff.TrackedArray)
return MLDataDevices.$op(ReverseDiff.value(x))
end
function MLDataDevices.$op(x::AbstractArray{<:ReverseDiff.TrackedReal})
return MLDataDevices.$op(ReverseDiff.value.(x))
end
end
end

end
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
module DeviceUtilsSparseArraysExt

using Adapt: Adapt
using DeviceUtils: CPUDevice
using MLDataDevices: CPUDevice
using SparseArrays: AbstractSparseArray

Adapt.adapt_storage(::CPUDevice, x::AbstractSparseArray) = x
Expand Down
10 changes: 5 additions & 5 deletions ext/DeviceUtilsTrackerExt.jl → ext/MLDataDevicesTrackerExt.jl
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
module DeviceUtilsTrackerExt

using Adapt: Adapt
using DeviceUtils: DeviceUtils, AMDGPUDevice, CUDADevice, MetalDevice, oneAPIDevice
using MLDataDevices: MLDataDevices, AMDGPUDevice, CUDADevice, MetalDevice, oneAPIDevice
using Tracker: Tracker

for op in (:_get_device, :_get_device_type)
@eval begin
DeviceUtils.$op(x::Tracker.TrackedArray) = DeviceUtils.$op(Tracker.data(x))
function DeviceUtils.$op(x::AbstractArray{<:Tracker.TrackedReal})
return DeviceUtils.$op(Tracker.data.(x))
MLDataDevices.$op(x::Tracker.TrackedArray) = MLDataDevices.$op(Tracker.data(x))
function MLDataDevices.$op(x::AbstractArray{<:Tracker.TrackedReal})
return MLDataDevices.$op(Tracker.data.(x))
end
end
end

DeviceUtils.__special_aos(::AbstractArray{<:Tracker.TrackedReal}) = true
MLDataDevices.__special_aos(::AbstractArray{<:Tracker.TrackedReal}) = true

for T in (AMDGPUDevice, AMDGPUDevice{Nothing}, CUDADevice,
CUDADevice{Nothing}, MetalDevice, oneAPIDevice)
Expand Down
Loading
Loading