Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions BitFaster.Caching.Benchmarks/AsyncLruAtomicBench.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Jobs;
using BitFaster.Caching.Lru;

namespace BitFaster.Caching.Benchmarks
{
[SimpleJob(RuntimeMoniker.Net48)]
[SimpleJob(RuntimeMoniker.Net60)]
[DisassemblyDiagnoser(printSource: true, maxDepth: 5)]
[MemoryDiagnoser]
public class AsyncLruAtomicBench
{
private static readonly ConcurrentDictionary<int, int> dictionary = new ConcurrentDictionary<int, int>(8, 9, EqualityComparer<int>.Default);

private static readonly ConcurrentLru<int, int> concurrentLru = new ConcurrentLru<int, int>(8, 9, EqualityComparer<int>.Default);

private static readonly ConcurrentLru<int, AsyncAtomic<int, int>> atomicConcurrentLru = new ConcurrentLru<int, AsyncAtomic<int, int>>(8, 9, EqualityComparer<int>.Default);

[Benchmark()]
public void ConcurrentDictionary()
{
Func<int, int> func = x => x;
dictionary.GetOrAdd(1, func);
}

[Benchmark(Baseline = true)]
public async Task ConcurrentLruAsync()
{
Func<int, Task<int>> func = x => Task.FromResult(x);
await concurrentLru.GetOrAddAsync(1, func).ConfigureAwait(false);
}

[Benchmark()]
public async Task AtomicConcurrentLruAsync()
{
Func<int, Task<int>> func = x => Task.FromResult(x);
await atomicConcurrentLru.GetOrAddAsync(1, func).ConfigureAwait(false);
}
}
}
55 changes: 55 additions & 0 deletions BitFaster.Caching.Benchmarks/AtomicLruBench.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Jobs;
using BitFaster.Caching.Lru;

namespace BitFaster.Caching.Benchmarks
{
[SimpleJob(RuntimeMoniker.Net48)]
[SimpleJob(RuntimeMoniker.Net60)]
[DisassemblyDiagnoser(printSource: true, maxDepth: 5)]
[MemoryDiagnoser]
public class AtomicLruBench
{
private static readonly ConcurrentDictionary<int, int> dictionary = new ConcurrentDictionary<int, int>(8, 9, EqualityComparer<int>.Default);

private static readonly ConcurrentLru<int, int> concurrentLru = new ConcurrentLru<int, int>(8, 9, EqualityComparer<int>.Default);

private static readonly ConcurrentLru<int, Atomic<int, int>> atomicConcurrentLru = new ConcurrentLru<int, Atomic<int, int>>(8, 9, EqualityComparer<int>.Default);

private static readonly ConcurrentLru<int, Lazy<int>> lazyConcurrentLru = new ConcurrentLru<int, Lazy<int>>(8, 9, EqualityComparer<int>.Default);

[Benchmark()]
public void ConcurrentDictionary()
{
Func<int, int> func = x => x;
dictionary.GetOrAdd(1, func);
}

[Benchmark(Baseline = true)]
public void ConcurrentLru()
{
Func<int, int> func = x => x;
concurrentLru.GetOrAdd(1, func);
}

[Benchmark()]
public void AtomicConcurrentLru()
{
Func<int, int> func = x => x;
atomicConcurrentLru.GetOrAdd(1, func);
}

[Benchmark()]
public void LazyConcurrentLru()
{
Func<int, Lazy<int>> func = x => new Lazy<int>(() => x);
lazyConcurrentLru.GetOrAdd(1, func);
}
}
}
87 changes: 87 additions & 0 deletions BitFaster.Caching.Benchmarks/ScopedLruExtBench.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Jobs;
using BitFaster.Caching.Lru;

namespace BitFaster.Caching.Benchmarks
{
//| Method | Mean | Error | StdDev | Ratio | RatioSD | Code Size | Gen 0 | Allocated |
//|---------------------------------------- |-----------:|----------:|----------:|------:|--------:|----------:|-------:|----------:|
//| ConcurrentDictionary | 8.791 ns | 0.0537 ns | 0.0476 ns | 0.48 | 0.00 | 396 B | - | - |
//| ConcurrentLru | 18.429 ns | 0.1539 ns | 0.1440 ns | 1.00 | 0.00 | 701 B | - | - |
//| ScopedConcurrentLruNativeFunc | 117.665 ns | 1.4390 ns | 1.3461 ns | 6.39 | 0.10 | 662 B | 0.0389 | 168 B |
//| ScopedConcurrentLruWrappedFunc | 132.697 ns | 0.6867 ns | 0.5734 ns | 7.19 | 0.08 | 565 B | 0.0610 | 264 B |
//| ScopedConcurrentLruWrappedFuncProtected | 133.997 ns | 0.5089 ns | 0.4249 ns | 7.26 | 0.05 | 621 B | 0.0610 | 264 B |
[SimpleJob(RuntimeMoniker.Net48)]
[SimpleJob(RuntimeMoniker.Net60)]
[DisassemblyDiagnoser(printSource: true, maxDepth: 5)]
[MemoryDiagnoser]
public class ScopedLruExtBench
{
private static readonly ConcurrentDictionary<int, SomeDisposable> dictionary = new ConcurrentDictionary<int, SomeDisposable>(8, 9, EqualityComparer<int>.Default);

private static readonly ConcurrentLru<int, SomeDisposable> concurrentLru = new ConcurrentLru<int, SomeDisposable>(8, 9, EqualityComparer<int>.Default);

private static readonly ConcurrentLru<int, Scoped<SomeDisposable>> scopedConcurrentLru = new ConcurrentLru<int, Scoped<SomeDisposable>>(8, 9, EqualityComparer<int>.Default);

[Benchmark()]
public SomeDisposable ConcurrentDictionary()
{
Func<int, SomeDisposable> func = x => new SomeDisposable();
return dictionary.GetOrAdd(1, func);
}

[Benchmark(Baseline = true)]
public SomeDisposable ConcurrentLru()
{
Func<int, SomeDisposable> func = x => new SomeDisposable();
return concurrentLru.GetOrAdd(1, func);
}

[Benchmark()]
public SomeDisposable ScopedConcurrentLruNativeFunc()
{
// function generates actual cached object (scoped wrapping item)
Func<int, Scoped<SomeDisposable>> func = x => new Scoped<SomeDisposable>(new SomeDisposable());
using (var l = scopedConcurrentLru.ScopedGetOrAdd(1, func))
{
return l.Value;
}
}

[Benchmark()]
public SomeDisposable ScopedConcurrentLruWrappedFunc()
{
// function generates item, extension method allocates a closure to create scoped<item>
Func<int, SomeDisposable> func = x => new SomeDisposable();
using (var l = scopedConcurrentLru.ScopedGetOrAdd(1, func))
{
return l.Value;
}
}

[Benchmark()]
public SomeDisposable ScopedConcurrentLruWrappedFuncProtected()
{
// function generates actual cached object (scoped wrapping item)
Func<int, Scoped<SomeDisposable>> func = x => new Scoped<SomeDisposable>(new SomeDisposable());
using (var l = scopedConcurrentLru.ScopedGetOrAddProtected(1, func))
{
return l.Value;
}
}
}

public class SomeDisposable : IDisposable
{
public void Dispose()
{

}
}
}
118 changes: 118 additions & 0 deletions BitFaster.Caching.UnitTests/DesiredApi.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using BitFaster.Caching.Lru;

namespace BitFaster.Caching.UnitTests
{
// Wrappers needed:
// - Atomic
// - Scoped (already exists)
// - ScopedAtomic
// - AsyncAtomic
// - ScopedAsyncAtomic
// There is no ScopedAsync, since that is just Scoped - the task is not stored so we only need scoped values in the cache.
public class DesiredApi
{
public static void HowToCacheAtomic()
{
var lru = new ConcurrentLru<int, Atomic<int, int>>(4);

// raw, this is a bit of a mess
var r = lru.GetOrAdd(1, i => new Atomic<int, int>()).GetValue(1, x => x);

// extension cleanup can hide it
var rr = lru.GetOrAdd(1, i => i);

lru.TryUpdate(2, 3);
lru.TryGet(1, out int v);
lru.AddOrUpdate(1, 2);
}

public static void HowToCacheScoped()
{
var lru = new ConcurrentLru<int, Scoped<SomeDisposable>>(4);

// this is not so clean, because the lambda has to input the scoped object
// if we wrap it, would need a closure inside the extension method. How bad is that?
using (var l = lru.ScopedGetOrAdd(1, x => new Scoped<SomeDisposable>(new SomeDisposable())))
{
var d = l.Value;
}
}

public static void HowToCacheScopedAtomic()
{
// ICache<K, ScopedAtomic<K, V>>
var scopedAtomicLru = new ConcurrentLru<int, ScopedAtomic<int, SomeDisposable>>(5);

using (var l = scopedAtomicLru.GetOrAdd(1, k => new SomeDisposable()))
{
var d = l.Value;
}

scopedAtomicLru.TryUpdate(2, new SomeDisposable());

scopedAtomicLru.AddOrUpdate(1, new SomeDisposable());

// TODO: how to clean this up to 1 line?
if (scopedAtomicLru.TryGetLifetime(1, out var lifetime))
{
using (lifetime)
{
var x = lifetime.Value;
}
}
}

public async static Task HowToCacheAsyncAtomic()
{
var asyncAtomicLru = new ConcurrentLru<int, AsyncAtomic<int, int>>(5);

var ar = await asyncAtomicLru.GetOrAddAsync(1, i => Task.FromResult(i));

asyncAtomicLru.TryUpdate(2, 3);
asyncAtomicLru.TryGet(1, out int v);
asyncAtomicLru.AddOrUpdate(1, 2);
}

// Requirements:
// 1. lifetime/value create is async end to end (if async delegate is used to create value)
// 2. value is created lazily, guarantee single instance of object, single invocation of lazy
// 3. lazy value is disposed by scope
// 4. lifetime keeps scope alive

public static async Task HowToCacheScopedAsyncAtomic()
{
var scopedAsyncAtomicLru = new ConcurrentLru<int, ScopedAsyncAtomic<int, SomeDisposable>>(4);
Func<int, Task<SomeDisposable>> valueFactory = k => Task.FromResult(new SomeDisposable());

using (var lifetime = await scopedAsyncAtomicLru.GetOrAddAsync(1, valueFactory))
{
var y = lifetime.Value;
}

scopedAsyncAtomicLru.TryUpdate(2, new SomeDisposable());

scopedAsyncAtomicLru.AddOrUpdate(1, new SomeDisposable());

// TODO: how to clean this up to 1 line?
if (scopedAsyncAtomicLru.TryGetLifetime(1, out var lifetime2))
{
using (lifetime2)
{
var x = lifetime2.Value;
}
}
}
}

public class SomeDisposable : IDisposable
{
public void Dispose()
{

}
}
}
59 changes: 59 additions & 0 deletions BitFaster.Caching.UnitTests/Lazy/AsyncAtomicExtensionsTests.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using BitFaster.Caching.Lru;
using FluentAssertions;
using Xunit;

namespace BitFaster.Caching.UnitTests.Lazy
{
public class AsyncAtomicExtensionsTests
{
private ConcurrentLru<int, AsyncAtomic<int, int>> lru = new(2, 9, EqualityComparer<int>.Default);

[Fact]
public async Task GetOrAddAsync()
{
var ar = await lru.GetOrAddAsync(1, i => Task.FromResult(i));

ar.Should().Be(1);

lru.TryGet(1, out int v);
lru.AddOrUpdate(1, 2);
}

[Fact]
public void TryUpdateWhenKeyDoesNotExistReturnsFalse()
{
lru.TryUpdate(2, 3).Should().BeFalse();
}

[Fact]
public void TryUpdateWhenKeyExistsUpdatesValue()
{
lru.AddOrUpdate(1, 2);

lru.TryUpdate(1, 42).Should().BeTrue();

lru.TryGet(1, out int v).Should().BeTrue();
v.Should().Be(42);
}

[Fact]
public void TryGetWhenKeyDoesNotExistReturnsFalse()
{
lru.TryGet(1, out int v).Should().BeFalse();
}

[Fact]
public void AddOrUpdateUpdatesValue()
{
lru.AddOrUpdate(1, 2);

lru.TryGet(1, out int v).Should().BeTrue();
v.Should().Be(2);
}
}
}
Loading