Skip to content

Commit

Permalink
Delete most source code, rely on NDTensorsCUDAExt
Browse files Browse the repository at this point in the history
  • Loading branch information
mtfishman committed May 7, 2024
1 parent cf88d62 commit 8b15c6e
Show file tree
Hide file tree
Showing 14 changed files with 35 additions and 1,761 deletions.
28 changes: 4 additions & 24 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,33 +1,13 @@
name = "ITensorGPU"
uuid = "d89171c1-af8f-46b3-badf-d2a472317c15"
authors = ["Katharine Hyatt", "Matthew Fishman <[email protected]>"]
version = "0.1.7"
version = "0.2.0"

[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa"
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SimpleTraits = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
cuTENSOR = "011b41b2-24ef-40a8-b3eb-fa098493e9e1"

[compat]
Adapt = "3.5, 4"
CUDA = "4.0"
Combinatorics = "1.0.2"
Functors = "0.2, 0.3, 0.4"
ITensors = "= 0.3.37"
NDTensors = "0.1.50"
SimpleTraits = "0.9.4"
StaticArrays = "1.2.13"
Strided = "1.1.2, 2"
TimerOutputs = "0.5.13"
cuTENSOR = "1.1.0"
julia = "1.6 - 1.9"
CUDA = "4, 5"
ITensors = "0.3, 0.4, 0.5"
julia = "1.6"
53 changes: 3 additions & 50 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,50 +1,3 @@
# ITensorGPU: Intelligent Tensors with GPU acceleration

[![codecov](https://codecov.io/gh/ITensor/ITensorGPU.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/ITensor/ITensorGPU.jl)

[![gitlab-ci](https://gitlab.com/JuliaGPU/ITensorGPU-jl/badges/master/pipeline.svg)](https://gitlab.com/JuliaGPU/ITensorGPU-jl/commits/master)

This package extends the functionality of [ITensors.jl](https://github.com/ITensor/ITensors.jl) to make use of CUDA-enabled GPUs to accelerate tensor contractions and factorizations. It sits on top of the wonderful [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl) package and uses NVIDIA's [cuTENSOR](https://developer.nvidia.com/cutensor) library for high-performance tensor operations.

## Installing ITensorGPU.jl

Dependencies:
- [Julia 1.3 or later](https://julialang.org/downloads/)
- [CUDA 10.1 or later](https://developer.nvidia.com/cuda-downloads) -- Currently only NVIDIA GPUs are supported. NVIDIA drivers are required so that Julia can make use of the NVIDIA GPU on your system.
- [cuTENSOR v1.0.0 or later](https://developer.nvidia.com/cutensor) -- A specialized library for perfoming permutation-free tensor contractions on the GPU. `libcutensor.so` needs to be in your `LD_LIBRARY_PATH` so that `CUDA.jl` will be able to find it.
- [ITensors.jl](https://itensor.github.io/ITensors.jl/stable/#Installation-1)

To properly install CUDA with Julia, it may be helpful to first follow the [CUDA.jl installation instructions](https://juliagpu.github.io/CUDA.jl/stable/installation/overview/) and test that you have that installed properly and that it is able to use `cuTENSOR`. You can run the commands:
```julia
julia> using CUDA.CUTENSOR

julia> CUTENSOR.has_cutensor()
true

julia> CUTENSOR.version()
v"1.2.1"
```
to check that `CUDA.jl` can see the version of `cuTENSOR` you have installed.

Once you have all of the dependencies installed, you can then go ahead and install `ITensorGPU.jl` with the following command:
```
julia> ]
pkg> add ITensorGPU
```

To check if this has all worked, you can run the package tests using:
```julia
julia> ]

pkg> test ITensorGPU
```

## Examples

Take a look at the `examples/` directory for examples of running ITensor calculations on the GPU.

For an application of `ITensorGPU.jl` to more sophisticated tensor network calculations, take a look at [PEPS.jl](https://github.com/ITensor/PEPS.jl).

For some background on the development and design of this package, you can take a look at [this blog post](https://kshyatt.github.io/post/itensorsgpu/) by Katie Hyatt, original author of the `ITensorGPU.jl` package.

| :warning: WARNING |
|:---------------------------|
| The ITensorGPU.jl package is deprecated and only provided for backwards compatibility. For an alternative, see the ITensors.jl documentation about [running ITensor on GPUs](https://itensor.github.io/ITensors.jl/dev/RunningOnGPUs.html). |
141 changes: 28 additions & 113 deletions src/ITensorGPU.jl
Original file line number Diff line number Diff line change
@@ -1,114 +1,29 @@
module ITensorGPU
using NDTensors

using Adapt
using CUDA
using CUDA.CUBLAS
using CUDA.CUSOLVER
using Functors
using ITensors
using LinearAlgebra
using Random
using SimpleTraits
using StaticArrays
using Strided
using TimerOutputs
using cuTENSOR

using NDTensors: setdata, setstorage, cpu, IsWrappedArray, parenttype

import Adapt: adapt_structure
import Base: *, permutedims!
import CUDA: CuArray, CuMatrix, CuVector, cu
import CUDA.Mem: pin
import ITensors:
randn!,
compute_contraction_labels,
eigen,
tensor,
scale!,
unioninds,
array,
matrix,
vector,
polar,
tensors,
truncate!,
leftlim,
rightlim,
permute,
BroadcastStyle,
Indices
import NDTensors:
Atrans,
Btrans,
CombinerTensor,
ContractionProperties,
Combiner,
Ctrans,
Diag,
DiagTensor,
Dense,
DenseTensor,
NonuniformDiag,
NonuniformDiagTensor,
Tensor,
UniformDiag,
UniformDiagTensor,
_contract!!,
_contract!,
_contract_scalar!,
_contract_scalar_noperm!,
can_contract,
compute_contraction_properties!,
contract!!,
contract!,
contract,
contraction_output,
contraction_output_type,
data,
getperm,
ind,
is_trivial_permutation,
outer!,
outer!!,
permutedims!!,
set_eltype,
set_ndims,
similartype,
zero_contraction_output
import cuTENSOR: cutensorContractionPlan_t, cutensorAlgo_t

#const ContractionPlans = Dict{String, Tuple{cutensorAlgo_t, cutensorContractionPlan_t}}()
const ContractionPlans = Dict{String,cutensorAlgo_t}()

include("cuarray/set_types.jl")
include("traits.jl")
include("adapt.jl")
include("tensor/cudense.jl")
include("tensor/dense.jl")
include("tensor/culinearalgebra.jl")
include("tensor/cutruncate.jl")
include("tensor/cucombiner.jl")
include("tensor/cudiag.jl")
include("cuitensor.jl")
include("mps/cumps.jl")

export cu,
cpu, cuITensor, randomCuITensor, cuMPS, randomCuMPS, productCuMPS, randomCuMPO, cuMPO

## TODO: Is this needed?
## const devs = Ref{Vector{CUDAdrv.CuDevice}}()
## const dev_rows = Ref{Int}(0)
## const dev_cols = Ref{Int}(0)
## function __init__()
## voltas = filter(dev->occursin("V100", CUDAdrv.name(dev)), collect(CUDAdrv.devices()))
## pascals = filter(dev->occursin("P100", CUDAdrv.name(dev)), collect(CUDAdrv.devices()))
## devs[] = voltas[1:1]
## #devs[] = pascals[1:2]
## CUBLASMG.cublasMgDeviceSelect(CUBLASMG.mg_handle(), length(devs[]), devs[])
## dev_rows[] = 1
## dev_cols[] = 1
## end

end #module
using CUDA: CUDA
using ITensors: cpu, cu
export cpu, cu

using ITensors: ITensor, cpu, cu, randomITensor
function cuITensor(args...; kwargs...)
return cu(ITensor(args...; kwargs...))
end
function randomCuITensor(args...; kwargs...)
return cu(randomITensor(args...; kwargs...))
end
export cuITensor, randomCuITensor

using ITensors.ITensorMPS: MPO, MPS, randomMPS
function CuMPS(args...; kwargs...)
return cu(MPS(args...; kwargs...))
end
function productCuMPS(args...; kwargs...)
return cu(MPS(args...; kwargs...))
end
function randomCuMPS(args...; kwargs...)
return cu(randomMPS(args...; kwargs...))
end
function CuMPO(args...; kwargs...)
return cu(MPO(args...; kwargs...))
end
export cuMPO, cuMPS, productCuMPS, randomCuMPO, randomCuMPS
end
33 changes: 0 additions & 33 deletions src/adapt.jl

This file was deleted.

17 changes: 0 additions & 17 deletions src/cuarray/set_types.jl

This file was deleted.

Loading

0 comments on commit 8b15c6e

Please sign in to comment.