The purpose of updatecache is to update the cache conveniently.
- prevent cache breakdown
- perform regular update of the cache
- limit the frequency of access to the upstream(single flight)
go get github.com/vearne/updatecache
use NewCache to create a local cache.
NewCache(waitUpdate bool, opts ...Option) *LocalCache
when the Get() is called
(c *LocalCache) Get(key any) any
true
: If the value corresponding to a key is being updated, wait for the update function to complete and obtain the latest valuefalse
: If the value corresponding to a key is being updated, just use previous value.
WithRateLimit create a Limiter to Limit the execution frequency of get value from upstream.
WithRateLimit(r float64, b int) Option
c := cache.NewCache(true, cache.WithRateLimit(200, 1))
optional value: debug | info | warn | error
export SIMPLE_LOG_LEVEL=debug
go test .
go test -v .
go test -run TestFirstLoad ./
package main
import (
cache "github.com/vearne/updatecache"
"log"
"sync/atomic"
"time"
)
func calcuDuration(value any) time.Duration {
return time.Second
}
func main() {
key := "aaa"
c := cache.NewCache(true, cache.WithRateLimit(200, 1))
var counter uint32 = 2
if !c.Contains(key) {
result := c.FirstLoad(key,
1,
func() (any, error) {
time.Sleep(time.Second)
log.Println("FirstLoad-GetValueFunc")
return 2, nil
},
10*time.Second)
c.DynamicUpdateLater(key, calcuDuration, func() (any, error) {
// get value from backend(for example: MySQL, MongoDB or other application)
log.Println("get value from backend...")
return atomic.AddUint32(&counter, 1), nil
})
log.Printf("result:%v \n", result)
}
for i := 0; i < 30; i++ {
time.Sleep(500 * time.Millisecond)
log.Println(c.Get(key))
}
}