Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,4 @@ ConcurrentCollections.jl provides the following lock-free collections for Julia
* [`ConcurrentQueue`](https://tkf.github.io/ConcurrentCollections.jl/dev/#ConcurrentCollections.ConcurrentQueue)
* [`ConcurrentStack`](https://tkf.github.io/ConcurrentCollections.jl/dev/#ConcurrentCollections.ConcurrentStack)
* [`WorkStealingDeque`](https://tkf.github.io/ConcurrentCollections.jl/dev/#ConcurrentCollections.WorkStealingDeque)

Experimental/unstable:

* [`ConcurrentDict`](https://tkf.github.io/ConcurrentCollections.jl/dev/#ConcurrentCollections.ConcurrentDict)
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ using BenchmarkTools: Benchmark, BenchmarkGroup

include("utils.jl")
include("bench_dict_histogram.jl")
include("bench_dict_haskey.jl")
include("bench_dict_get_existing.jl")
include("bench_dict_migration.jl")
include("bench_queue_pushpop.jl")
Expand Down
64 changes: 64 additions & 0 deletions benchmark/ConcurrentCollectionsBenchmarks/src/bench_dict_haskey.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
module BenchDictHasKey

using BenchmarkTools
using ConcurrentCollections

function generate(;
datasize = 2^13, # `Base.Dict` is better on smaller size
keysize = 50, # expensive isequal; favors ConcurrentDict
nkeys = 100,
)
vs = UInt64.(1:datasize)
ks = string.(vs; pad = keysize)
# ks = vs
# ks = UInt32.(vs)
cdict = ConcurrentDict{eltype(ks),eltype(vs)}(zip(ks, vs))
ks_100 = ks[1:nkeys]
ks_000 = string.(.-vs[1:nkeys])
ks_050 = ifelse.(isodd.(vs[1:nkeys]), ks_100, ks_000)
return (; cdict, ks_100, ks_000, ks_050)
end

const CACHE = Ref{Any}()

function setup(; cases = [:ks_050, :ks_000], kwargs...)
data = generate(; kwargs...)
(; cdict) = data
dict = Dict(cdict)
CACHE[] = (; dict, data...)

labelmap = Dict(
:ks_100 => "100% existing",
:ks_050 => "50% existing", # `Base.Dict` is better with 50% hit
:ks_000 => "0% existing",
)

suite = BenchmarkGroup()
for ksprop in cases
s1 = suite[labelmap[ksprop]] = BenchmarkGroup()
ks = getproperty(data, ksprop)
s1["base-seq"] = @benchmarkable(
count(k -> haskey(dict, k), ks),
setup = begin
dict = CACHE[].dict::$(typeof(dict))
ks = CACHE[].$ksprop::$(typeof(ks))
end,
evals = 1,
)
s1["cdict-seq"] = @benchmarkable(
count(k -> haskey(dict, k), ks),
setup = begin
dict = CACHE[].cdict::$(typeof(cdict))
ks = CACHE[].$ksprop::$(typeof(ks))
end,
evals = 1,
)
end
return suite
end

function clear()
CACHE[] = nothing
end

end # module
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ function hist_seq!(dict::ConcurrentDict, data)
end

function hist_parallel!(dict::ConcurrentDict, data; ntasks = Threads.nthreads())
# for k in data
# dict[k] = 0
# end
@sync for chunk in Iterators.partition(data, cld(length(data), ntasks))
Threads.@spawn hist_seq!(dict, chunk)
end
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,16 @@ module BenchDictMigration

using BenchmarkTools
using ConcurrentCollections
using ConcurrentCollections.Implementations: LINEAR_PROBING_DICT_EXPAND_BASESIZE, migrate!
using ConcurrentCollections.Implementations:
LINEAR_PROBING_DICT_EXPAND_BASESIZE, migrate_serial!, new_slots_and_pairnodes

pad16(x) = string(x; pad = 16)

function generate(f = pad16; datasize = LINEAR_PROBING_DICT_EXPAND_BASESIZE[])
vs = UInt64.(1:datasize)
ks = f.(vs)
dict = ConcurrentDict{eltype(ks),eltype(vs)}(zip(ks, vs))
return dict.slots
return dict
end

const CACHE = Ref{Any}()
Expand All @@ -25,12 +26,15 @@ function setup(; generate_options...)

suite = BenchmarkGroup()
for key in keys(CACHE[])
SlotsType = typeof(CACHE[][key])
CacheType = typeof(CACHE[][key])
suite[key] = @benchmarkable(
migrate!(newslots, slots),
migrate_serial!(newslots, newpairnodes, slots, pairnodes),
setup = begin
slots = copy(CACHE[][$key]::$SlotsType)
newslots = similar(slots, length(slots) * 2)
dict = CACHE[][$key]::$CacheType
slots = copy(dict.slots)
pairnodes = copy(dict.pairnodes)
newslots, newpairnodes =
new_slots_and_pairnodes(slots, pairnodes, true)
end,
evals = 1,
)
Expand Down
9 changes: 2 additions & 7 deletions docs/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,17 @@
```@index
```

## Collections
## Queue/stack

```@docs
ConcurrentQueue
ConcurrentStack
WorkStealingDeque
```

## Functions

```@docs
trypop!
trypopfirst!
```

## Experimental
## Hash table

```@docs
ConcurrentDict
Expand Down
53 changes: 0 additions & 53 deletions src/atomicsutils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,59 +9,6 @@ end
return Ptr{UInt}(pointer_from_objref(obj) + offset)
end

@inline atomic_getfield(obj, field::Val) = atomic_getfield(obj, field, seq_cst)
@inline function atomic_getfield(obj, field::Val, order)
i = something(fieldindex(obj, field))
offset = fieldoffset(typeof(obj), i)
fptr = Ptr{UInt}(pointer_from_objref(obj) + offset)
GC.@preserve obj begin
uint = UnsafeAtomics.load(fptr, order)
end
t = fieldtype(typeof(obj), i)
value = unsafe_pointer_to_objref(Ptr{Cvoid}(uint))
return value::t
end
# TODO: support immutables

@inline atomic_setfield!(obj, field::Val, value) =
atomic_setfield!(obj, field, value, seq_cst)
@inline function atomic_setfield!(obj, field::Val, value, order)
if Base.issingletontype(typeof(value))
UnsafeAtomics.store!(
fieldpointer(obj, field),
UInt(pointer_from_singleton(value)),
order,
)
else
fptr = fieldpointer(obj, field)
ref = Ref{Any}(value)
GC.@preserve obj ref begin
vint = unsafe_load(Ptr{UInt}(pointer_from_objref(ref)))
UnsafeAtomics.store!(fptr, vint, order)
end
end
end
# TODO: support immutables

@inline atomic_casfield!(obj, field::Val, cmp, new) =
atomic_casfield!(obj, field, cmp, new, acq_rel, acquire)
@inline function atomic_casfield!(
obj,
field::Val,
cmp,
new,
success_ordering,
failure_ordering,
)
fptr = fieldpointer(obj, field)
cmpint = UInt(_pointer_from_objref(cmp))
newint = UInt(_pointer_from_objref(new))
GC.@preserve obj cmp new begin
found = UnsafeAtomics.cas!(fptr, cmpint, newint, success_ordering, failure_ordering)
end
return found == cmpint
end

@inline atomic_modifyfield!(obj, field::Val, op, x) =
atomic_modifyfield!(obj, field, op, x, seq_cst)
@inline function atomic_modifyfield!(obj, field::Val, op::OP, x, order) where {OP}
Expand Down
Loading