From 5a753feadf4467b6934ff9438785be55db7b55fb Mon Sep 17 00:00:00 2001 From: Tony Holdstock-Brown Date: Thu, 4 Jan 2024 18:39:36 -0800 Subject: [PATCH] Add ccache One day we'll look at modern LRU algorithms, such as S3LRU or Sieve. --- caching_parser.go | 27 +- expr_test.go | 8 +- go.mod | 1 + go.sum | 6 + parser_test.go | 2 +- .../karlseguin/ccache/v2/.gitignore | 1 + .../github.com/karlseguin/ccache/v2/Makefile | 5 + .../github.com/karlseguin/ccache/v2/bucket.go | 105 ++++++ .../github.com/karlseguin/ccache/v2/cache.go | 323 ++++++++++++++++++ .../karlseguin/ccache/v2/configuration.go | 103 ++++++ .../github.com/karlseguin/ccache/v2/item.go | 107 ++++++ .../karlseguin/ccache/v2/layeredbucket.go | 121 +++++++ .../karlseguin/ccache/v2/layeredcache.go | 302 ++++++++++++++++ .../karlseguin/ccache/v2/license.txt | 19 ++ .../github.com/karlseguin/ccache/v2/readme.md | 196 +++++++++++ .../karlseguin/ccache/v2/secondarycache.go | 72 ++++ vendor/modules.txt | 3 + 17 files changed, 1386 insertions(+), 15 deletions(-) create mode 100644 vendor/github.com/karlseguin/ccache/v2/.gitignore create mode 100644 vendor/github.com/karlseguin/ccache/v2/Makefile create mode 100644 vendor/github.com/karlseguin/ccache/v2/bucket.go create mode 100644 vendor/github.com/karlseguin/ccache/v2/cache.go create mode 100644 vendor/github.com/karlseguin/ccache/v2/configuration.go create mode 100644 vendor/github.com/karlseguin/ccache/v2/item.go create mode 100644 vendor/github.com/karlseguin/ccache/v2/layeredbucket.go create mode 100644 vendor/github.com/karlseguin/ccache/v2/layeredcache.go create mode 100644 vendor/github.com/karlseguin/ccache/v2/license.txt create mode 100644 vendor/github.com/karlseguin/ccache/v2/readme.md create mode 100644 vendor/github.com/karlseguin/ccache/v2/secondarycache.go diff --git a/caching_parser.go b/caching_parser.go index 5da59a1..3190f9f 100644 --- a/caching_parser.go +++ b/caching_parser.go @@ -1,30 +1,35 @@ package expr import ( - "sync" "sync/atomic" + "time" "github.com/google/cel-go/cel" - // "github.com/karlseguin/ccache/v2" + "github.com/karlseguin/ccache/v2" ) var ( + CacheTime = time.Hour + replace = []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t"} ) // NewCachingParser returns a CELParser which lifts quoted literals out of the expression // as variables and uses caching to cache expression parsing, resulting in improved // performance when parsing expressions. -func NewCachingParser(env *cel.Env) CELParser { +func NewCachingParser(env *cel.Env, cache *ccache.Cache) CELParser { + if cache == nil { + cache = ccache.New(ccache.Configure()) + } return &cachingParser{ - env: env, + cache: cache, + env: env, } } type cachingParser struct { // cache is a global cache of precompiled expressions. - // cache *ccache.Cache - stupidNoInternetCache sync.Map + cache *ccache.Cache env *cel.Env @@ -35,20 +40,20 @@ type cachingParser struct { func (c *cachingParser) Parse(expr string) (*cel.Ast, *cel.Issues, LiftedArgs) { expr, vars := liftLiterals(expr) - // TODO: ccache, when I have internet. - if cached, ok := c.stupidNoInternetCache.Load(expr); ok { - p := cached.(ParsedCelExpr) + if cached := c.cache.Get(expr); cached != nil { + cached.Extend(CacheTime) + p := cached.Value().(ParsedCelExpr) atomic.AddInt64(&c.hits, 1) return p.AST, p.Issues, vars } ast, issues := c.env.Parse(expr) - c.stupidNoInternetCache.Store(expr, ParsedCelExpr{ + c.cache.Set(expr, ParsedCelExpr{ Expr: expr, AST: ast, Issues: issues, - }) + }, CacheTime) atomic.AddInt64(&c.misses, 1) return ast, issues, vars diff --git a/expr_test.go b/expr_test.go index 93a0594..7dcd03c 100644 --- a/expr_test.go +++ b/expr_test.go @@ -14,7 +14,9 @@ import ( "github.com/stretchr/testify/require" ) -func BenchmarkCachingEvaluate1_000(b *testing.B) { benchEval(1_000, NewCachingParser(newEnv()), b) } +func BenchmarkCachingEvaluate1_000(b *testing.B) { + benchEval(1_000, NewCachingParser(newEnv(), nil), b) +} // func BenchmarkNonCachingEvaluate1_000(b *testing.B) { benchEval(1_000, EnvParser(newEnv()), b) } @@ -71,7 +73,7 @@ func evaluate(b *testing.B, i int, parser TreeParser) error { func TestEvaluate(t *testing.T) { ctx := context.Background() - parser, err := NewTreeParser(NewCachingParser(newEnv())) + parser, err := NewTreeParser(NewCachingParser(newEnv(), nil)) require.NoError(t, err) e := NewAggregateEvaluator(parser, testBoolEvaluator) @@ -159,7 +161,7 @@ func TestEvaluate(t *testing.T) { func TestEvaluate_ArrayIndexes(t *testing.T) { ctx := context.Background() - parser, err := NewTreeParser(NewCachingParser(newEnv())) + parser, err := NewTreeParser(NewCachingParser(newEnv(), nil)) require.NoError(t, err) e := NewAggregateEvaluator(parser, testBoolEvaluator) diff --git a/go.mod b/go.mod index d0e8064..33be73f 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.21.0 require ( github.com/google/cel-go v0.18.2 + github.com/karlseguin/ccache/v2 v2.0.8 github.com/ohler55/ojg v1.21.0 github.com/plar/go-adaptive-radix-tree v1.0.5 github.com/stretchr/testify v1.8.4 diff --git a/go.sum b/go.sum index 1bbb41c..f8dc05c 100644 --- a/go.sum +++ b/go.sum @@ -9,6 +9,10 @@ github.com/google/cel-go v0.18.2/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/karlseguin/ccache/v2 v2.0.8 h1:lT38cE//uyf6KcFok0rlgXtGFBWxkI6h/qg4tbFyDnA= +github.com/karlseguin/ccache/v2 v2.0.8/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= github.com/ohler55/ojg v1.21.0 h1:niqSS6yl3PQZJrqh7pKs/zinl4HebGe8urXEfpvlpYY= github.com/ohler55/ojg v1.21.0/go.mod h1:gQhDVpQLqrmnd2eqGAvJtn+NfKoYJbe/A4Sj3/Vro4o= github.com/plar/go-adaptive-radix-tree v1.0.5 h1:rHR89qy/6c24TBAHullFMrJsU9hGlKmPibdBGU6/gbM= @@ -26,6 +30,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= diff --git a/parser_test.go b/parser_test.go index e8be265..6dcb711 100644 --- a/parser_test.go +++ b/parser_test.go @@ -979,7 +979,7 @@ func TestParse(t *testing.T) { func TestParse_LiftedVars(t *testing.T) { ctx := context.Background() - cachingCelParser := NewCachingParser(newEnv()) + cachingCelParser := NewCachingParser(newEnv(), nil) assert := func(t *testing.T, tests []parseTestInput) { t.Helper() diff --git a/vendor/github.com/karlseguin/ccache/v2/.gitignore b/vendor/github.com/karlseguin/ccache/v2/.gitignore new file mode 100644 index 0000000..48b8bf9 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/.gitignore @@ -0,0 +1 @@ +vendor/ diff --git a/vendor/github.com/karlseguin/ccache/v2/Makefile b/vendor/github.com/karlseguin/ccache/v2/Makefile new file mode 100644 index 0000000..5b3f26b --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/Makefile @@ -0,0 +1,5 @@ +t: + go test ./... + +f: + go fmt ./... diff --git a/vendor/github.com/karlseguin/ccache/v2/bucket.go b/vendor/github.com/karlseguin/ccache/v2/bucket.go new file mode 100644 index 0000000..e7e2ab0 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/bucket.go @@ -0,0 +1,105 @@ +package ccache + +import ( + "strings" + "sync" + "time" +) + +type bucket struct { + sync.RWMutex + lookup map[string]*Item +} + +func (b *bucket) itemCount() int { + b.RLock() + defer b.RUnlock() + return len(b.lookup) +} + +func (b *bucket) forEachFunc(matches func(key string, item *Item) bool) bool { + lookup := b.lookup + b.RLock() + defer b.RUnlock() + for key, item := range lookup { + if !matches(key, item) { + return false + } + } + return true +} + +func (b *bucket) get(key string) *Item { + b.RLock() + defer b.RUnlock() + return b.lookup[key] +} + +func (b *bucket) set(key string, value interface{}, duration time.Duration, track bool) (*Item, *Item) { + expires := time.Now().Add(duration).UnixNano() + item := newItem(key, value, expires, track) + b.Lock() + existing := b.lookup[key] + b.lookup[key] = item + b.Unlock() + return item, existing +} + +func (b *bucket) delete(key string) *Item { + b.Lock() + item := b.lookup[key] + delete(b.lookup, key) + b.Unlock() + return item +} + +// This is an expensive operation, so we do what we can to optimize it and limit +// the impact it has on concurrent operations. Specifically, we: +// 1 - Do an initial iteration to collect matches. This allows us to do the +// "expensive" prefix check (on all values) using only a read-lock +// 2 - Do a second iteration, under write lock, for the matched results to do +// the actual deletion + +// Also, this is the only place where the Bucket is aware of cache detail: the +// deletables channel. Passing it here lets us avoid iterating over matched items +// again in the cache. Further, we pass item to deletables BEFORE actually removing +// the item from the map. I'm pretty sure this is 100% fine, but it is unique. +// (We do this so that the write to the channel is under the read lock and not the +// write lock) +func (b *bucket) deleteFunc(matches func(key string, item *Item) bool, deletables chan *Item) int { + lookup := b.lookup + items := make([]*Item, 0) + + b.RLock() + for key, item := range lookup { + if matches(key, item) { + deletables <- item + items = append(items, item) + } + } + b.RUnlock() + + if len(items) == 0 { + // avoid the write lock if we can + return 0 + } + + b.Lock() + for _, item := range items { + delete(lookup, item.key) + } + b.Unlock() + return len(items) +} + +func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int { + return b.deleteFunc(func(key string, item *Item) bool { + return strings.HasPrefix(key, prefix) + }, deletables) +} + +func (b *bucket) clear() { + b.Lock() + b.lookup = make(map[string]*Item) + b.Unlock() +} diff --git a/vendor/github.com/karlseguin/ccache/v2/cache.go b/vendor/github.com/karlseguin/ccache/v2/cache.go new file mode 100644 index 0000000..0f87c92 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/cache.go @@ -0,0 +1,323 @@ +// An LRU cached aimed at high concurrency +package ccache + +import ( + "container/list" + "hash/fnv" + "sync/atomic" + "time" +) + +// The cache has a generic 'control' channel that is used to send +// messages to the worker. These are the messages that can be sent to it +type getDropped struct { + res chan int +} +type setMaxSize struct { + size int64 +} + +type clear struct { + done chan struct{} +} + +type Cache struct { + *Configuration + list *list.List + size int64 + buckets []*bucket + bucketMask uint32 + deletables chan *Item + promotables chan *Item + control chan interface{} +} + +// Create a new cache with the specified configuration +// See ccache.Configure() for creating a configuration +func New(config *Configuration) *Cache { + c := &Cache{ + list: list.New(), + Configuration: config, + bucketMask: uint32(config.buckets) - 1, + buckets: make([]*bucket, config.buckets), + control: make(chan interface{}), + } + for i := 0; i < config.buckets; i++ { + c.buckets[i] = &bucket{ + lookup: make(map[string]*Item), + } + } + c.restart() + return c +} + +func (c *Cache) ItemCount() int { + count := 0 + for _, b := range c.buckets { + count += b.itemCount() + } + return count +} + +func (c *Cache) DeletePrefix(prefix string) int { + count := 0 + for _, b := range c.buckets { + count += b.deletePrefix(prefix, c.deletables) + } + return count +} + +// Deletes all items that the matches func evaluates to true. +func (c *Cache) DeleteFunc(matches func(key string, item *Item) bool) int { + count := 0 + for _, b := range c.buckets { + count += b.deleteFunc(matches, c.deletables) + } + return count +} + +func (c *Cache) ForEachFunc(matches func(key string, item *Item) bool) { + for _, b := range c.buckets { + if !b.forEachFunc(matches) { + break + } + } +} + +// Get an item from the cache. Returns nil if the item wasn't found. +// This can return an expired item. Use item.Expired() to see if the item +// is expired and item.TTL() to see how long until the item expires (which +// will be negative for an already expired item). +func (c *Cache) Get(key string) *Item { + item := c.bucket(key).get(key) + if item == nil { + return nil + } + if !item.Expired() { + c.promote(item) + } + return item +} + +// Used when the cache was created with the Track() configuration option. +// Avoid otherwise +func (c *Cache) TrackingGet(key string) TrackedItem { + item := c.Get(key) + if item == nil { + return NilTracked + } + item.track() + return item +} + +// Used when the cache was created with the Track() configuration option. +// Sets the item, and returns a tracked reference to it. +func (c *Cache) TrackingSet(key string, value interface{}, duration time.Duration) TrackedItem { + return c.set(key, value, duration, true) +} + +// Set the value in the cache for the specified duration +func (c *Cache) Set(key string, value interface{}, duration time.Duration) { + c.set(key, value, duration, false) +} + +// Replace the value if it exists, does not set if it doesn't. +// Returns true if the item existed an was replaced, false otherwise. +// Replace does not reset item's TTL +func (c *Cache) Replace(key string, value interface{}) bool { + item := c.bucket(key).get(key) + if item == nil { + return false + } + c.Set(key, value, item.TTL()) + return true +} + +// Attempts to get the value from the cache and calles fetch on a miss (missing +// or stale item). If fetch returns an error, no value is cached and the error +// is returned back to the caller. +func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { + item := c.Get(key) + if item != nil && !item.Expired() { + return item, nil + } + value, err := fetch() + if err != nil { + return nil, err + } + return c.set(key, value, duration, false), nil +} + +// Remove the item from the cache, return true if the item was present, false otherwise. +func (c *Cache) Delete(key string) bool { + item := c.bucket(key).delete(key) + if item != nil { + c.deletables <- item + return true + } + return false +} + +// Clears the cache +func (c *Cache) Clear() { + done := make(chan struct{}) + c.control <- clear{done: done} + <-done +} + +// Stops the background worker. Operations performed on the cache after Stop +// is called are likely to panic +func (c *Cache) Stop() { + close(c.promotables) + <-c.control +} + +// Gets the number of items removed from the cache due to memory pressure since +// the last time GetDropped was called +func (c *Cache) GetDropped() int { + res := make(chan int) + c.control <- getDropped{res: res} + return <-res +} + +// Sets a new max size. That can result in a GC being run if the new maxium size +// is smaller than the cached size +func (c *Cache) SetMaxSize(size int64) { + c.control <- setMaxSize{size} +} + +func (c *Cache) restart() { + c.deletables = make(chan *Item, c.deleteBuffer) + c.promotables = make(chan *Item, c.promoteBuffer) + c.control = make(chan interface{}) + go c.worker() +} + +func (c *Cache) deleteItem(bucket *bucket, item *Item) { + bucket.delete(item.key) //stop other GETs from getting it + c.deletables <- item +} + +func (c *Cache) set(key string, value interface{}, duration time.Duration, track bool) *Item { + item, existing := c.bucket(key).set(key, value, duration, track) + if existing != nil { + c.deletables <- existing + } + c.promote(item) + return item +} + +func (c *Cache) bucket(key string) *bucket { + h := fnv.New32a() + h.Write([]byte(key)) + return c.buckets[h.Sum32()&c.bucketMask] +} + +func (c *Cache) promote(item *Item) { + select { + case c.promotables <- item: + default: + } + +} + +func (c *Cache) worker() { + defer close(c.control) + dropped := 0 + for { + select { + case item, ok := <-c.promotables: + if ok == false { + goto drain + } + if c.doPromote(item) && c.size > c.maxSize { + dropped += c.gc() + } + case item := <-c.deletables: + c.doDelete(item) + case control := <-c.control: + switch msg := control.(type) { + case getDropped: + msg.res <- dropped + dropped = 0 + case setMaxSize: + c.maxSize = msg.size + if c.size > c.maxSize { + dropped += c.gc() + } + case clear: + for _, bucket := range c.buckets { + bucket.clear() + } + c.size = 0 + c.list = list.New() + msg.done <- struct{}{} + } + } + } + +drain: + for { + select { + case item := <-c.deletables: + c.doDelete(item) + default: + close(c.deletables) + return + } + } +} + +func (c *Cache) doDelete(item *Item) { + if item.element == nil { + item.promotions = -2 + } else { + c.size -= item.size + if c.onDelete != nil { + c.onDelete(item) + } + c.list.Remove(item.element) + } +} + +func (c *Cache) doPromote(item *Item) bool { + //already deleted + if item.promotions == -2 { + return false + } + if item.element != nil { //not a new item + if item.shouldPromote(c.getsPerPromote) { + c.list.MoveToFront(item.element) + item.promotions = 0 + } + return false + } + + c.size += item.size + item.element = c.list.PushFront(item) + return true +} + +func (c *Cache) gc() int { + dropped := 0 + element := c.list.Back() + for i := 0; i < c.itemsToPrune; i++ { + if element == nil { + return dropped + } + prev := element.Prev() + item := element.Value.(*Item) + if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { + c.bucket(item.key).delete(item.key) + c.size -= item.size + c.list.Remove(element) + if c.onDelete != nil { + c.onDelete(item) + } + dropped += 1 + item.promotions = -2 + } + element = prev + } + return dropped +} diff --git a/vendor/github.com/karlseguin/ccache/v2/configuration.go b/vendor/github.com/karlseguin/ccache/v2/configuration.go new file mode 100644 index 0000000..d618215 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/configuration.go @@ -0,0 +1,103 @@ +package ccache + +type Configuration struct { + maxSize int64 + buckets int + itemsToPrune int + deleteBuffer int + promoteBuffer int + getsPerPromote int32 + tracking bool + onDelete func(item *Item) +} + +// Creates a configuration object with sensible defaults +// Use this as the start of the fluent configuration: +// e.g.: ccache.New(ccache.Configure().MaxSize(10000)) +func Configure() *Configuration { + return &Configuration{ + buckets: 16, + itemsToPrune: 500, + deleteBuffer: 1024, + getsPerPromote: 3, + promoteBuffer: 1024, + maxSize: 5000, + tracking: false, + } +} + +// The max size for the cache +// [5000] +func (c *Configuration) MaxSize(max int64) *Configuration { + c.maxSize = max + return c +} + +// Keys are hashed into % bucket count to provide greater concurrency (every set +// requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...) +// [16] +func (c *Configuration) Buckets(count uint32) *Configuration { + if count == 0 || ((count&(^count+1)) == count) == false { + count = 16 + } + c.buckets = int(count) + return c +} + +// The number of items to prune when memory is low +// [500] +func (c *Configuration) ItemsToPrune(count uint32) *Configuration { + c.itemsToPrune = int(count) + return c +} + +// The size of the queue for items which should be promoted. If the queue fills +// up, promotions are skipped +// [1024] +func (c *Configuration) PromoteBuffer(size uint32) *Configuration { + c.promoteBuffer = int(size) + return c +} + +// The size of the queue for items which should be deleted. If the queue fills +// up, calls to Delete() will block +func (c *Configuration) DeleteBuffer(size uint32) *Configuration { + c.deleteBuffer = int(size) + return c +} + +// Give a large cache with a high read / write ratio, it's usually unnecessary +// to promote an item on every Get. GetsPerPromote specifies the number of Gets +// a key must have before being promoted +// [3] +func (c *Configuration) GetsPerPromote(count int32) *Configuration { + c.getsPerPromote = count + return c +} + +// Typically, a cache is agnostic about how cached values are use. This is fine +// for a typical cache usage, where you fetch an item from the cache, do something +// (write it out) and nothing else. + +// However, if callers are going to keep a reference to a cached item for a long +// time, things get messy. Specifically, the cache can evict the item, while +// references still exist. Technically, this isn't an issue. However, if you reload +// the item back into the cache, you end up with 2 objects representing the same +// data. This is a waste of space and could lead to weird behavior (the type an +// identity map is meant to solve). + +// By turning tracking on and using the cache's TrackingGet, the cache +// won't evict items which you haven't called Release() on. It's a simple reference +// counter. +func (c *Configuration) Track() *Configuration { + c.tracking = true + return c +} + +// OnDelete allows setting a callback function to react to ideam deletion. +// This typically allows to do a cleanup of resources, such as calling a Close() on +// cached object that require some kind of tear-down. +func (c *Configuration) OnDelete(callback func(item *Item)) *Configuration { + c.onDelete = callback + return c +} diff --git a/vendor/github.com/karlseguin/ccache/v2/item.go b/vendor/github.com/karlseguin/ccache/v2/item.go new file mode 100644 index 0000000..0226129 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/item.go @@ -0,0 +1,107 @@ +package ccache + +import ( + "container/list" + "sync/atomic" + "time" +) + +type Sized interface { + Size() int64 +} + +type TrackedItem interface { + Value() interface{} + Release() + Expired() bool + TTL() time.Duration + Expires() time.Time + Extend(duration time.Duration) +} + +type nilItem struct{} + +func (n *nilItem) Value() interface{} { return nil } +func (n *nilItem) Release() {} + +func (i *nilItem) Expired() bool { + return true +} + +func (i *nilItem) TTL() time.Duration { + return time.Minute +} + +func (i *nilItem) Expires() time.Time { + return time.Time{} +} + +func (i *nilItem) Extend(duration time.Duration) { +} + +var NilTracked = new(nilItem) + +type Item struct { + key string + group string + promotions int32 + refCount int32 + expires int64 + size int64 + value interface{} + element *list.Element +} + +func newItem(key string, value interface{}, expires int64, track bool) *Item { + size := int64(1) + if sized, ok := value.(Sized); ok { + size = sized.Size() + } + item := &Item{ + key: key, + value: value, + promotions: 0, + size: size, + expires: expires, + } + if track { + item.refCount = 1 + } + return item +} + +func (i *Item) shouldPromote(getsPerPromote int32) bool { + i.promotions += 1 + return i.promotions == getsPerPromote +} + +func (i *Item) Value() interface{} { + return i.value +} + +func (i *Item) track() { + atomic.AddInt32(&i.refCount, 1) +} + +func (i *Item) Release() { + atomic.AddInt32(&i.refCount, -1) +} + +func (i *Item) Expired() bool { + expires := atomic.LoadInt64(&i.expires) + return expires < time.Now().UnixNano() +} + +func (i *Item) TTL() time.Duration { + expires := atomic.LoadInt64(&i.expires) + return time.Nanosecond * time.Duration(expires-time.Now().UnixNano()) +} + +func (i *Item) Expires() time.Time { + expires := atomic.LoadInt64(&i.expires) + return time.Unix(0, expires) +} + +func (i *Item) Extend(duration time.Duration) { + atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano()) +} diff --git a/vendor/github.com/karlseguin/ccache/v2/layeredbucket.go b/vendor/github.com/karlseguin/ccache/v2/layeredbucket.go new file mode 100644 index 0000000..46e704d --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/layeredbucket.go @@ -0,0 +1,121 @@ +package ccache + +import ( + "sync" + "time" +) + +type layeredBucket struct { + sync.RWMutex + buckets map[string]*bucket +} + +func (b *layeredBucket) itemCount() int { + count := 0 + b.RLock() + defer b.RUnlock() + for _, b := range b.buckets { + count += b.itemCount() + } + return count +} + +func (b *layeredBucket) get(primary, secondary string) *Item { + bucket := b.getSecondaryBucket(primary) + if bucket == nil { + return nil + } + return bucket.get(secondary) +} + +func (b *layeredBucket) getSecondaryBucket(primary string) *bucket { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists == false { + return nil + } + return bucket +} + +func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration, track bool) (*Item, *Item) { + b.Lock() + bkt, exists := b.buckets[primary] + if exists == false { + bkt = &bucket{lookup: make(map[string]*Item)} + b.buckets[primary] = bkt + } + b.Unlock() + item, existing := bkt.set(secondary, value, duration, track) + item.group = primary + return item, existing +} + +func (b *layeredBucket) delete(primary, secondary string) *Item { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists == false { + return nil + } + return bucket.delete(secondary) +} + +func (b *layeredBucket) deletePrefix(primary, prefix string, deletables chan *Item) int { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists == false { + return 0 + } + return bucket.deletePrefix(prefix, deletables) +} + +func (b *layeredBucket) deleteFunc(primary string, matches func(key string, item *Item) bool, deletables chan *Item) int { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists == false { + return 0 + } + return bucket.deleteFunc(matches, deletables) +} + +func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists == false { + return false + } + + bucket.Lock() + defer bucket.Unlock() + + if l := len(bucket.lookup); l == 0 { + return false + } + for key, item := range bucket.lookup { + delete(bucket.lookup, key) + deletables <- item + } + return true +} + +func (b *layeredBucket) forEachFunc(primary string, matches func(key string, item *Item) bool) { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists { + bucket.forEachFunc(matches) + } +} + +func (b *layeredBucket) clear() { + b.Lock() + defer b.Unlock() + for _, bucket := range b.buckets { + bucket.clear() + } + b.buckets = make(map[string]*bucket) +} diff --git a/vendor/github.com/karlseguin/ccache/v2/layeredcache.go b/vendor/github.com/karlseguin/ccache/v2/layeredcache.go new file mode 100644 index 0000000..3ffaf0d --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/layeredcache.go @@ -0,0 +1,302 @@ +// An LRU cached aimed at high concurrency +package ccache + +import ( + "container/list" + "hash/fnv" + "sync/atomic" + "time" +) + +type LayeredCache struct { + *Configuration + list *list.List + buckets []*layeredBucket + bucketMask uint32 + size int64 + deletables chan *Item + promotables chan *Item + control chan interface{} +} + +// Create a new layered cache with the specified configuration. +// A layered cache used a two keys to identify a value: a primary key +// and a secondary key. Get, Set and Delete require both a primary and +// secondary key. However, DeleteAll requires only a primary key, deleting +// all values that share the same primary key. + +// Layered Cache is useful as an HTTP cache, where an HTTP purge might +// delete multiple variants of the same resource: +// primary key = "user/44" +// secondary key 1 = ".json" +// secondary key 2 = ".xml" + +// See ccache.Configure() for creating a configuration +func Layered(config *Configuration) *LayeredCache { + c := &LayeredCache{ + list: list.New(), + Configuration: config, + bucketMask: uint32(config.buckets) - 1, + buckets: make([]*layeredBucket, config.buckets), + deletables: make(chan *Item, config.deleteBuffer), + control: make(chan interface{}), + } + for i := 0; i < int(config.buckets); i++ { + c.buckets[i] = &layeredBucket{ + buckets: make(map[string]*bucket), + } + } + c.restart() + return c +} + +func (c *LayeredCache) ItemCount() int { + count := 0 + for _, b := range c.buckets { + count += b.itemCount() + } + return count +} + +// Get an item from the cache. Returns nil if the item wasn't found. +// This can return an expired item. Use item.Expired() to see if the item +// is expired and item.TTL() to see how long until the item expires (which +// will be negative for an already expired item). +func (c *LayeredCache) Get(primary, secondary string) *Item { + item := c.bucket(primary).get(primary, secondary) + if item == nil { + return nil + } + if item.expires > time.Now().UnixNano() { + c.promote(item) + } + return item +} + +func (c *LayeredCache) ForEachFunc(primary string, matches func(key string, item *Item) bool) { + c.bucket(primary).forEachFunc(primary, matches) +} + +// Get the secondary cache for a given primary key. This operation will +// never return nil. In the case where the primary key does not exist, a +// new, underlying, empty bucket will be created and returned. +func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache { + primaryBkt := c.bucket(primary) + bkt := primaryBkt.getSecondaryBucket(primary) + primaryBkt.Lock() + if bkt == nil { + bkt = &bucket{lookup: make(map[string]*Item)} + primaryBkt.buckets[primary] = bkt + } + primaryBkt.Unlock() + return &SecondaryCache{ + bucket: bkt, + pCache: c, + } +} + +// Used when the cache was created with the Track() configuration option. +// Avoid otherwise +func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem { + item := c.Get(primary, secondary) + if item == nil { + return NilTracked + } + item.track() + return item +} + +// Set the value in the cache for the specified duration +func (c *LayeredCache) TrackingSet(primary, secondary string, value interface{}, duration time.Duration) TrackedItem { + return c.set(primary, secondary, value, duration, true) +} + +// Set the value in the cache for the specified duration +func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) { + c.set(primary, secondary, value, duration, false) +} + +// Replace the value if it exists, does not set if it doesn't. +// Returns true if the item existed an was replaced, false otherwise. +// Replace does not reset item's TTL nor does it alter its position in the LRU +func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool { + item := c.bucket(primary).get(primary, secondary) + if item == nil { + return false + } + c.Set(primary, secondary, value, item.TTL()) + return true +} + +// Attempts to get the value from the cache and calles fetch on a miss. +// If fetch returns an error, no value is cached and the error is returned back +// to the caller. +func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { + item := c.Get(primary, secondary) + if item != nil { + return item, nil + } + value, err := fetch() + if err != nil { + return nil, err + } + return c.set(primary, secondary, value, duration, false), nil +} + +// Remove the item from the cache, return true if the item was present, false otherwise. +func (c *LayeredCache) Delete(primary, secondary string) bool { + item := c.bucket(primary).delete(primary, secondary) + if item != nil { + c.deletables <- item + return true + } + return false +} + +// Deletes all items that share the same primary key +func (c *LayeredCache) DeleteAll(primary string) bool { + return c.bucket(primary).deleteAll(primary, c.deletables) +} + +// Deletes all items that share the same primary key and prefix. +func (c *LayeredCache) DeletePrefix(primary, prefix string) int { + return c.bucket(primary).deletePrefix(primary, prefix, c.deletables) +} + +// Deletes all items that share the same primary key and where the matches func evaluates to true. +func (c *LayeredCache) DeleteFunc(primary string, matches func(key string, item *Item) bool) int { + return c.bucket(primary).deleteFunc(primary, matches, c.deletables) +} + +// Clears the cache +func (c *LayeredCache) Clear() { + done := make(chan struct{}) + c.control <- clear{done: done} + <-done +} + +func (c *LayeredCache) Stop() { + close(c.promotables) + <-c.control +} + +// Gets the number of items removed from the cache due to memory pressure since +// the last time GetDropped was called +func (c *LayeredCache) GetDropped() int { + res := make(chan int) + c.control <- getDropped{res: res} + return <-res +} + +// Sets a new max size. That can result in a GC being run if the new maxium size +// is smaller than the cached size +func (c *LayeredCache) SetMaxSize(size int64) { + c.control <- setMaxSize{size} +} + +func (c *LayeredCache) restart() { + c.promotables = make(chan *Item, c.promoteBuffer) + c.control = make(chan interface{}) + go c.worker() +} + +func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration, track bool) *Item { + item, existing := c.bucket(primary).set(primary, secondary, value, duration, track) + if existing != nil { + c.deletables <- existing + } + c.promote(item) + return item +} + +func (c *LayeredCache) bucket(key string) *layeredBucket { + h := fnv.New32a() + h.Write([]byte(key)) + return c.buckets[h.Sum32()&c.bucketMask] +} + +func (c *LayeredCache) promote(item *Item) { + c.promotables <- item +} + +func (c *LayeredCache) worker() { + defer close(c.control) + dropped := 0 + for { + select { + case item, ok := <-c.promotables: + if ok == false { + return + } + if c.doPromote(item) && c.size > c.maxSize { + dropped += c.gc() + } + case item := <-c.deletables: + if item.element == nil { + atomic.StoreInt32(&item.promotions, -2) + } else { + c.size -= item.size + if c.onDelete != nil { + c.onDelete(item) + } + c.list.Remove(item.element) + } + case control := <-c.control: + switch msg := control.(type) { + case getDropped: + msg.res <- dropped + dropped = 0 + case setMaxSize: + c.maxSize = msg.size + if c.size > c.maxSize { + dropped += c.gc() + } + case clear: + for _, bucket := range c.buckets { + bucket.clear() + } + c.size = 0 + c.list = list.New() + msg.done <- struct{}{} + } + } + } +} + +func (c *LayeredCache) doPromote(item *Item) bool { + // deleted before it ever got promoted + if atomic.LoadInt32(&item.promotions) == -2 { + return false + } + if item.element != nil { //not a new item + if item.shouldPromote(c.getsPerPromote) { + c.list.MoveToFront(item.element) + atomic.StoreInt32(&item.promotions, 0) + } + return false + } + c.size += item.size + item.element = c.list.PushFront(item) + return true +} + +func (c *LayeredCache) gc() int { + element := c.list.Back() + dropped := 0 + for i := 0; i < c.itemsToPrune; i++ { + if element == nil { + return dropped + } + prev := element.Prev() + item := element.Value.(*Item) + if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 { + c.bucket(item.group).delete(item.group, item.key) + c.size -= item.size + c.list.Remove(element) + item.promotions = -2 + dropped += 1 + } + element = prev + } + return dropped +} diff --git a/vendor/github.com/karlseguin/ccache/v2/license.txt b/vendor/github.com/karlseguin/ccache/v2/license.txt new file mode 100644 index 0000000..aebeebf --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/license.txt @@ -0,0 +1,19 @@ +Copyright (c) 2013 Karl Seguin. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/karlseguin/ccache/v2/readme.md b/vendor/github.com/karlseguin/ccache/v2/readme.md new file mode 100644 index 0000000..8617f18 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/readme.md @@ -0,0 +1,196 @@ +# CCache +CCache is an LRU Cache, written in Go, focused on supporting high concurrency. + +Lock contention on the list is reduced by: + +* Introducing a window which limits the frequency that an item can get promoted +* Using a buffered channel to queue promotions for a single worker +* Garbage collecting within the same thread as the worker + +Unless otherwise stated, all methods are thread-safe. + +## Setup + +First, download the project: + +```go + go get github.com/karlseguin/ccache/v2 +``` + +## Configuration +Next, import and create a `Cache` instance: + + +```go +import ( + "github.com/karlseguin/ccache/v2" +) + +var cache = ccache.New(ccache.Configure()) +``` + +`Configure` exposes a chainable API: + +```go +var cache = ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)) +``` + +The most likely configuration options to tweak are: + +* `MaxSize(int)` - the maximum number size to store in the cache (default: 5000) +* `GetsPerPromote(int)` - the number of times an item is fetched before we promote it. For large caches with long TTLs, it normally isn't necessary to promote an item after every fetch (default: 3) +* `ItemsToPrune(int)` - the number of items to prune when we hit `MaxSize`. Freeing up more than 1 slot at a time improved performance (default: 500) + +Configurations that change the internals of the cache, which aren't as likely to need tweaking: + +* `Buckets` - ccache shards its internal map to provide a greater amount of concurrency. Must be a power of 2 (default: 16). +* `PromoteBuffer(int)` - the size of the buffer to use to queue promotions (default: 1024) +* `DeleteBuffer(int)` the size of the buffer to use to queue deletions (default: 1024) + +## Usage + +Once the cache is setup, you can `Get`, `Set` and `Delete` items from it. A `Get` returns an `*Item`: + +### Get +```go +item := cache.Get("user:4") +if item == nil { + //handle +} else { + user := item.Value().(*User) +} +``` +The returned `*Item` exposes a number of methods: + +* `Value() interface{}` - the value cached +* `Expired() bool` - whether the item is expired or not +* `TTL() time.Duration` - the duration before the item expires (will be a negative value for expired items) +* `Expires() time.Time` - the time the item will expire + +By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source. + +### Set +`Set` expects the key, value and ttl: + +```go +cache.Set("user:4", user, time.Minute * 10) +``` + +### Fetch +There's also a `Fetch` which mixes a `Get` and a `Set`: + +```go +item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error) { + //code to fetch the data incase of a miss + //should return the data to cache and the error, if any +}) +``` + +### Delete +`Delete` expects the key to delete. It's ok to call `Delete` on a non-existent key: + +```go +cache.Delete("user:4") +``` + +### DeletePrefix +`DeletePrefix` deletes all keys matching the provided prefix. Returns the number of keys removed. + +### DeleteFunc +`DeleteFunc` deletes all items that the provided matches func evaluates to true. Returns the number of keys removed. + +### ForEachFunc +`ForEachFunc` iterates through all keys and values in the map and passes them to the provided function. Iteration stops if the function returns false. Iteration order is random. + +### Clear +`Clear` clears the cache. If the cache's gc is running, `Clear` waits for it to finish. + +### Extend +The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time. + +### Replace +The value of an item can be updated to a new value without renewing the item's TTL or it's position in the LRU: + +```go +cache.Replace("user:4", user) +``` + +`Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned. + +### GetDropped +You can get the number of keys evicted due to memory pressure by calling `GetDropped`: + +```go +dropped := cache.GetDropped() +``` +The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant to be called asynchronously for statistics /monitoring purposes. + +### Stop +The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called +the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache. + +## Tracking +CCache supports a special tracking mode which is meant to be used in conjunction with other pieces of your code that maintains a long-lived reference to data. + +When you configure your cache with `Track()`: + +```go +cache = ccache.New(ccache.Configure().Track()) +``` + +The items retrieved via `TrackingGet` will not be eligible for purge until `Release` is called on them: + +```go +item := cache.TrackingGet("user:4") +user := item.Value() //will be nil if "user:4" didn't exist in the cache +item.Release() //can be called even if item.Value() returned nil +``` + +In practice, `Release` wouldn't be called until later, at some other place in your code. `TrackingSet` can be used to set a value to be tracked. + +There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways. + +More important, it helps ensure that your code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system. + +## LayeredCache + +CCache's `LayeredCache` stores and retrieves values by both a primary and secondary key. Deletion can happen against either the primary and secondary key, or the primary key only (removing all values that share the same primary key). + +`LayeredCache` is useful for HTTP caching, when you want to purge all variations of a request. + +`LayeredCache` takes the same configuration object as the main cache, exposes the same optional tracking capabilities, but exposes a slightly different API: + +```go +cache := ccache.Layered(ccache.Configure()) + +cache.Set("/users/goku", "type:json", "{value_to_cache}", time.Minute * 5) +cache.Set("/users/goku", "type:xml", "", time.Minute * 5) + +json := cache.Get("/users/goku", "type:json") +xml := cache.Get("/users/goku", "type:xml") + +cache.Delete("/users/goku", "type:json") +cache.Delete("/users/goku", "type:xml") +// OR +cache.DeleteAll("/users/goku") +``` + +# SecondaryCache + +In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with: + +```go +cache := ccache.Layered(ccache.Configure()) +sCache := cache.GetOrCreateSecondaryCache("/users/goku") +sCache.Set("type:json", "{value_to_cache}", time.Minute * 5) +``` + +The semantics for interacting with the `SecondaryCache` are exactly the same as for a regular `Cache`. However, one difference is that `Get` will not return nil, but will return an empty 'cache' for a non-existent primary key. + +## Size +By default, items added to a cache have a size of 1. This means that if you configure `MaxSize(10000)`, you'll be able to store 10000 items in the cache. + +However, if the values you set into the cache have a method `Size() int64`, this size will be used. Note that ccache has an overhead of ~350 bytes per entry, which isn't taken into account. In other words, given a filled up cache, with `MaxSize(4096000)` and items that return a `Size() int64` of 2048, we can expect to find 2000 items (4096000/2048) taking a total space of 4796000 bytes. + +## Want Something Simpler? +For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache) diff --git a/vendor/github.com/karlseguin/ccache/v2/secondarycache.go b/vendor/github.com/karlseguin/ccache/v2/secondarycache.go new file mode 100644 index 0000000..cd322c2 --- /dev/null +++ b/vendor/github.com/karlseguin/ccache/v2/secondarycache.go @@ -0,0 +1,72 @@ +package ccache + +import "time" + +type SecondaryCache struct { + bucket *bucket + pCache *LayeredCache +} + +// Get the secondary key. +// The semantics are the same as for LayeredCache.Get +func (s *SecondaryCache) Get(secondary string) *Item { + return s.bucket.get(secondary) +} + +// Set the secondary key to a value. +// The semantics are the same as for LayeredCache.Set +func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item { + item, existing := s.bucket.set(secondary, value, duration, false) + if existing != nil { + s.pCache.deletables <- existing + } + s.pCache.promote(item) + return item +} + +// Fetch or set a secondary key. +// The semantics are the same as for LayeredCache.Fetch +func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { + item := s.Get(secondary) + if item != nil { + return item, nil + } + value, err := fetch() + if err != nil { + return nil, err + } + return s.Set(secondary, value, duration), nil +} + +// Delete a secondary key. +// The semantics are the same as for LayeredCache.Delete +func (s *SecondaryCache) Delete(secondary string) bool { + item := s.bucket.delete(secondary) + if item != nil { + s.pCache.deletables <- item + return true + } + return false +} + +// Replace a secondary key. +// The semantics are the same as for LayeredCache.Replace +func (s *SecondaryCache) Replace(secondary string, value interface{}) bool { + item := s.Get(secondary) + if item == nil { + return false + } + s.Set(secondary, value, item.TTL()) + return true +} + +// Track a secondary key. +// The semantics are the same as for LayeredCache.TrackingGet +func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem { + item := c.Get(secondary) + if item == nil { + return NilTracked + } + item.track() + return item +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 66415de..1397f8c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -26,6 +26,9 @@ github.com/google/cel-go/common/types/traits github.com/google/cel-go/interpreter github.com/google/cel-go/parser github.com/google/cel-go/parser/gen +# github.com/karlseguin/ccache/v2 v2.0.8 +## explicit; go 1.13 +github.com/karlseguin/ccache/v2 # github.com/ohler55/ojg v1.21.0 ## explicit; go 1.21 github.com/ohler55/ojg