From 37a9364a1bb00ac2d54d9f134d940956f567c42a Mon Sep 17 00:00:00 2001 From: Oliver Schulz Date: Sun, 22 May 2022 13:09:40 +0200 Subject: [PATCH 1/2] Support Adapt.AbstractGPUDevice --- lib/cudadrv/devices.jl | 2 +- lib/cudadrv/events.jl | 3 +++ lib/cusparse/CUSPARSE.jl | 2 +- lib/cusparse/array.jl | 3 +++ src/CUDA.jl | 2 +- src/array.jl | 8 ++++++++ 6 files changed, 17 insertions(+), 3 deletions(-) diff --git a/lib/cudadrv/devices.jl b/lib/cudadrv/devices.jl index 543524cd19..89f1d061e6 100644 --- a/lib/cudadrv/devices.jl +++ b/lib/cudadrv/devices.jl @@ -9,7 +9,7 @@ export Get a handle to a compute device. """ -struct CuDevice +struct CuDevice <: AbstractGPUDevice handle::CUdevice function CuDevice(ordinal::Integer) diff --git a/lib/cudadrv/events.jl b/lib/cudadrv/events.jl index 8c4192d9c8..87d5e395a8 100644 --- a/lib/cudadrv/events.jl +++ b/lib/cudadrv/events.jl @@ -133,3 +133,6 @@ macro elapsed(ex) elapsed(t0, t1) end end + + +Adapt.get_computing_device(e::CuEvent) = device(e.ctx) diff --git a/lib/cusparse/CUSPARSE.jl b/lib/cusparse/CUSPARSE.jl index 3fddf946f3..f686fcaa91 100644 --- a/lib/cusparse/CUSPARSE.jl +++ b/lib/cusparse/CUSPARSE.jl @@ -11,7 +11,7 @@ using CEnum: @cenum using LinearAlgebra using LinearAlgebra: HermOrSym -using Adapt: Adapt, adapt +using Adapt: Adapt, adapt, AbstractGPUDevice, get_computing_device using SparseArrays diff --git a/lib/cusparse/array.jl b/lib/cusparse/array.jl index 34d52c0a85..ebbf47832d 100644 --- a/lib/cusparse/array.jl +++ b/lib/cusparse/array.jl @@ -15,6 +15,9 @@ const AbstractCuSparseMatrix{Tv, Ti} = AbstractCuSparseArray{Tv, Ti, 2} Base.convert(T::Type{<:AbstractCuSparseArray}, m::AbstractArray) = m isa T ? m : T(m) +Adapt.get_computing_device(A::AbstractCuSparseArray) = get_computing_device(A.nzVal) + + mutable struct CuSparseVector{Tv, Ti} <: AbstractCuSparseVector{Tv, Ti} iPtr::CuVector{Ti} nzVal::CuVector{Tv} diff --git a/src/CUDA.jl b/src/CUDA.jl index 395f062d6c..64f5bff2ca 100644 --- a/src/CUDA.jl +++ b/src/CUDA.jl @@ -8,7 +8,7 @@ using LLVM using LLVM.Interop using Core: LLVMPtr -using Adapt: Adapt, adapt, WrappedArray +using Adapt: Adapt, adapt, WrappedArray, AbstractGPUDevice, get_computing_device using Requires: @require diff --git a/src/array.jl b/src/array.jl index c72cca5ce7..1df2929ada 100644 --- a/src/array.jl +++ b/src/array.jl @@ -245,6 +245,14 @@ function device(A::CuArray) end +Adapt.get_computing_device(A::CuArray) = device(A) + +Adapt.adapt_storage(dev::CuDevice, x) = device!(() -> Adapt.adapt_storage(CuArray, x), dev) + +Sys.total_memory(dev::CuDevice) = CUDA.totalmem(dev) +Sys.free_memory(dev::CuDevice) = unsigned(CUDA.device!(CUDA.available_memory, dev)) + + ## derived types export DenseCuArray, DenseCuVector, DenseCuMatrix, DenseCuVecOrMat, From 9bbc01e06afcedd66c8dd054aa5ed1a8b21f1430 Mon Sep 17 00:00:00 2001 From: Oliver Schulz Date: Thu, 26 May 2022 13:50:34 +0200 Subject: [PATCH 2/2] Adapt to compute unit handling changes in Adapt --- lib/cudadrv/events.jl | 2 +- lib/cusparse/CUSPARSE.jl | 3 ++- lib/cusparse/array.jl | 2 +- src/CUDA.jl | 3 ++- src/array.jl | 3 +-- 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/cudadrv/events.jl b/lib/cudadrv/events.jl index 87d5e395a8..2d7e838e2a 100644 --- a/lib/cudadrv/events.jl +++ b/lib/cudadrv/events.jl @@ -135,4 +135,4 @@ macro elapsed(ex) end -Adapt.get_computing_device(e::CuEvent) = device(e.ctx) +Adapt.get_compute_unit_impl(@nospecialize(TypeHistory::Type), e::CuEvent) = device(e.ctx) diff --git a/lib/cusparse/CUSPARSE.jl b/lib/cusparse/CUSPARSE.jl index f686fcaa91..8b84303c25 100644 --- a/lib/cusparse/CUSPARSE.jl +++ b/lib/cusparse/CUSPARSE.jl @@ -11,7 +11,8 @@ using CEnum: @cenum using LinearAlgebra using LinearAlgebra: HermOrSym -using Adapt: Adapt, adapt, AbstractGPUDevice, get_computing_device +import Adapt +using Adapt: Adapt, adapt, AbstractGPUDevice using SparseArrays diff --git a/lib/cusparse/array.jl b/lib/cusparse/array.jl index ebbf47832d..333ef13776 100644 --- a/lib/cusparse/array.jl +++ b/lib/cusparse/array.jl @@ -15,7 +15,7 @@ const AbstractCuSparseMatrix{Tv, Ti} = AbstractCuSparseArray{Tv, Ti, 2} Base.convert(T::Type{<:AbstractCuSparseArray}, m::AbstractArray) = m isa T ? m : T(m) -Adapt.get_computing_device(A::AbstractCuSparseArray) = get_computing_device(A.nzVal) +Adapt.get_compute_unit_impl(@nospecialize(TypeHistory::Type), A::AbstractCuSparseArray) = device(A.nzVal) mutable struct CuSparseVector{Tv, Ti} <: AbstractCuSparseVector{Tv, Ti} diff --git a/src/CUDA.jl b/src/CUDA.jl index 64f5bff2ca..7ad5508c6a 100644 --- a/src/CUDA.jl +++ b/src/CUDA.jl @@ -8,7 +8,8 @@ using LLVM using LLVM.Interop using Core: LLVMPtr -using Adapt: Adapt, adapt, WrappedArray, AbstractGPUDevice, get_computing_device +import Adapt +using Adapt: Adapt, adapt, WrappedArray, AbstractGPUDevice using Requires: @require diff --git a/src/array.jl b/src/array.jl index 1df2929ada..e4e0f8b1b3 100644 --- a/src/array.jl +++ b/src/array.jl @@ -244,8 +244,7 @@ function device(A::CuArray) return device(A.storage.buffer.ctx) end - -Adapt.get_computing_device(A::CuArray) = device(A) +Adapt.get_compute_unit_impl(@nospecialize(TypeHistory::Type), A::CuArray) = device(A) Adapt.adapt_storage(dev::CuDevice, x) = device!(() -> Adapt.adapt_storage(CuArray, x), dev)