Skip to content

Commit

Permalink
Move NDTensors module into ITensors module (#650)
Browse files Browse the repository at this point in the history
* Move NDTensors module into ITensors module
  • Loading branch information
mtfishman committed May 7, 2021
1 parent 092f67b commit 02e3bd0
Show file tree
Hide file tree
Showing 48 changed files with 8,406 additions and 39 deletions.
9 changes: 5 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,27 +1,28 @@
name = "ITensors"
uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5"
authors = ["Matthew Fishman <mfishman@flatironinstitute.org>",
"Miles Stoudenmire <mstoudenmire@flatironinstitute.org>"]
authors = ["Matthew Fishman <mfishman@flatironinstitute.org>", "Miles Stoudenmire <mstoudenmire@flatironinstitute.org>"]
version = "0.2.0"

[deps]
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6"

[compat]
Compat = "2.1, 3"
HDF5 = "0.14,0.15"
KrylovKit = "0.4.2, 0.5"
NDTensors = "= 0.1.32"
PackageCompiler = "1.0.0"
StaticArrays = "0.12, 1.0"
TimerOutputs = "0.5.5"
Expand Down
4 changes: 0 additions & 4 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"

[compat]
NDTensors = "= 0.1.32"
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@

module ContractionSequenceOptimization

using NDTensors

import NDTensors: dim

export optimal_contraction_sequence, contraction_cost

include("utils.jl")
Expand Down
4 changes: 4 additions & 0 deletions src/ContractionSequenceOptimization/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@
# 64 elements in the set.
# (See https://discourse.julialang.org/t/parse-an-array-of-bits-bitarray-to-an-integer/42361/11).

# Previously we used the definition in NDTensors:
#import NDTensors: dim
import ITensors: dim

# `is` could be Vector{Int} for BitSet
function dim(is::IndexSetT, ind_dims::Vector) where {IndexSetT<:Union{Vector{Int},BitSet}}
isempty(is) && return one(eltype(inds_dims))
Expand Down
5 changes: 2 additions & 3 deletions src/ITensors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ using Compat
using HDF5
using KrylovKit
using LinearAlgebra
using NDTensors
using PackageCompiler
using Pkg
using Printf
Expand All @@ -23,10 +22,10 @@ using StaticArrays
using TimerOutputs

#####################################
# NDTensors (definitions that will be moved to NDTensors
# module)
# NDTensors
#
include("NDTensors/NDTensors.jl")
using .NDTensors

#####################################
# ContractionSequenceOptimization
Expand Down
186 changes: 181 additions & 5 deletions src/NDTensors/NDTensors.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,186 @@
module NDTensors

using Base.Threads
using Compat
using Dictionaries
using Random
using LinearAlgebra
using StaticArrays
using HDF5
using Requires
using Strided
using TimerOutputs
using TupleTools

using Base: @propagate_inbounds, ReshapedArray

using Base.Cartesian: @nexprs

using Base.Threads: @spawn

#####################################
# Imports and exports
#
include("exports.jl")
include("imports.jl")

#####################################
# DenseTensor and DiagTensor
#
# Definitions to move to NDTensors
include("aliasstyle.jl")
include("similar.jl")
include("tupletools.jl")
include("dims.jl")
include("tensorstorage.jl")
include("tensor.jl")
include("contraction_logic.jl")
include("dense.jl")
include("symmetric.jl")
include("linearalgebra.jl")
include("diag.jl")
include("combiner.jl")
include("truncate.jl")
include("svd.jl")

#####################################
# BlockSparseTensor
#
include("blocksparse/blockdims.jl")
include("blocksparse/block.jl")
include("blocksparse/blockoffsets.jl")
include("blocksparse/blocksparse.jl")
include("blocksparse/blocksparsetensor.jl")
include("blocksparse/diagblocksparse.jl")
include("blocksparse/combiner.jl")
include("blocksparse/linearalgebra.jl")

#####################################
# Empty
#
include("empty.jl")

#####################################
# Deprecations
#
include("deprecated.jl")

#####################################
# A global timer used with TimerOutputs.jl
#

const timer = TimerOutput()

#####################################
# Optional block sparse multithreading
#

include("blas_get_num_threads.jl")

const _using_threaded_blocksparse = Ref(false)

function enable_threaded_blocksparse_docstring(module_name)
return """
$(module_name).enable_threaded_blocksparse()
$(module_name).disable_threaded_blocksparse()
Enable or disable block sparse multithreading.
Returns the current state of `$(module_name).using_threaded_blocksparse()`, i.e. `true` if threaded block sparse was previously enabled, and `false` if threaded block sparse was previously disabled. This is helpful for turning block sparse threading on or off temporarily. For example:
```julia
using_threaded_blocksparse = $(module_name).enable_threaded_blocksparse()
# Run code that you want to be threaded
if !using_threaded_blocksparse
$(module_name).disable_threaded_blocksparse()
end
```
Note that you need to start Julia with multiple threads. For example, to start Julia with 4 threads, you can use any of the following:
```
\$ julia --threads=4
\$ julia -t 4
\$ JULIA_NUM_THREADS=4 julia
```
In addition, we have found that it is best to disable `BLAS` and `Strided` multithreading when using block sparse multithreading. You can do that with the commands `using LinearAlgebra; BLAS.set_num_threads(1)` and `$(module_name).Strided.disable_threads()`.
See also: `$(module_name).enable_threaded_blocksparse`, `$(module_name).disable_threaded_blocksparse`, `$(module_name).using_threaded_blocksparse`.
"""
end

function _enable_threaded_blocksparse()
current_using_threaded_blocksparse = using_threaded_blocksparse()
if !current_using_threaded_blocksparse
if Threads.nthreads() == 1
println(
"WARNING: You are trying to enable block sparse multithreading, but you have started Julia with only a single thread. You can start Julia with `N` threads with `julia -t N`, and check the number of threads Julia can use with `Threads.nthreads()`. Your system has $(Sys.CPU_THREADS) threads available to use, which you can determine by running `Sys.CPU_THREADS`.\n",
)
end
if blas_get_num_threads() > 1 && Threads.nthreads() > 1
println(
"WARNING: You are enabling block sparse multithreading, but BLAS $(BLAS.vendor()) is currently set to use $(blas_get_num_threads()) threads. When using block sparse multithreading, we recommend setting BLAS to use only a single thread, otherwise you may see suboptimal performance. You can set it with `using LinearAlgebra; BLAS.set_num_threads(1)`.\n",
)
end
if Strided.get_num_threads() > 1
println(
"WARNING: You are enabling block sparse multithreading, but Strided.jl is currently set to use $(Strided.get_num_threads()) threads for performing dense tensor permutations. When using block sparse multithreading, we recommend setting Strided.jl to use only a single thread, otherwise you may see suboptimal performance. You can set it with `NDTensors.Strided.disable_threads()` and see the current number of threads it is using with `NDTensors.Strided.get_num_threads()`.\n",
)
end
_using_threaded_blocksparse[] = true
end
return current_using_threaded_blocksparse
end

function _disable_threaded_blocksparse()
current_using_threaded_blocksparse = using_threaded_blocksparse()
if current_using_threaded_blocksparse
_using_threaded_blocksparse[] = false
end
return current_using_threaded_blocksparse
end

"""
$(enable_threaded_blocksparse_docstring(@__MODULE__))
"""
using_threaded_blocksparse() = _using_threaded_blocksparse[]

"""
$(enable_threaded_blocksparse_docstring(@__MODULE__))
"""
enable_threaded_blocksparse() = _enable_threaded_blocksparse()

"""
$(enable_threaded_blocksparse_docstring(@__MODULE__))
"""
disable_threaded_blocksparse() = _disable_threaded_blocksparse()

#####################################
# Optional TBLIS contraction backend
#

const _using_tblis = Ref(false)

using_tblis() = _using_tblis[]

function enable_tblis()
_using_tblis[] = true
return nothing
end

function disable_tblis()
_using_tblis[] = false
return nothing
end

storage(T::Tensor) = store(T)
function __init__()
@require TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9" begin
enable_tblis()
include("tblis.jl")
end
@require Octavian = "6fd5a793-0b7e-452c-907f-f8bfe9c57db4" begin
include("octavian.jl")
end
end

#function NDTensors.blockdims(inds::IndexSet, block::Tuple)
# return ntuple(i -> blockdim(inds,block,i), ValLength(block))
#end
end # module NDTensors
34 changes: 34 additions & 0 deletions src/NDTensors/aliasstyle.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
"""
AliasStyle
A trait that determines the aliasing behavior of a constructor or function,
for example whether or not a function or constructor might return an alias
of one of the inputs (i.e. the output shares memory with one of the inputs,
such that modifying the output also modifies the input or vice versa).
See also [`AllowAlias`](@ref) and [`NeverAlias`](@ref).
"""
abstract type AliasStyle end

"""
AllowAlias
Singleton type used in a constructor or function indicating
that the constructor or function may return an alias of the input data when
possible, i.e. the output may share data with the input. For a constructor
`T(AllowAlias(), args...)`, this would act like `Base.convert(T, args...)`.
See also [`AliasStyle`](@ref) and [`NeverAlias`](@ref).
"""
struct AllowAlias <: AliasStyle end

"""
NeverAlias
Singleton type used in a constructor or function indicating
that the constructor or function will never return an alias of the input data,
i.e. the output will never share data with one of the inputs.
See also [`AliasStyle`](@ref) and [`AllowAlias`](@ref).
"""
struct NeverAlias <: AliasStyle end
40 changes: 40 additions & 0 deletions src/NDTensors/blas_get_num_threads.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@

#
# blas_get_num_threads()
# Get the number of BLAS threads
# This can be replaced by BLAS.get_num_threads() in Julia v1.6
#

function guess_vendor()
# like determine_vendor, but guesses blas in some cases
# where determine_vendor returns :unknown
ret = BLAS.vendor()
if Sys.isapple() && (ret == :unknown)
ret = :osxblas
end
return ret
end

_tryparse_env_int(key) = tryparse(Int, get(ENV, key, ""))

blas_get_num_threads()::Union{Int,Nothing} = _get_num_threads()

function _get_num_threads(; _blas=guess_vendor())::Union{Int,Nothing}
if _blas === :openblas || _blas === :openblas64
return Int(ccall((BLAS.@blasfunc(openblas_get_num_threads), BLAS.libblas), Cint, ()))
elseif _blas === :mkl
return Int(ccall((:mkl_get_max_threads, BLAS.libblas), Cint, ()))
elseif _blas === :osxblas
key = "VECLIB_MAXIMUM_THREADS"
nt = _tryparse_env_int(key)
if nt === nothing
@warn "Failed to read environment variable $key" maxlog = 1
else
return nt
end
else
@assert _blas === :unknown
end
@warn "Could not get number of BLAS threads. Returning `nothing` instead." maxlog = 1
return nothing
end

0 comments on commit 02e3bd0

Please sign in to comment.