commit da751c02fb5c063ecd2ebda6d74a9c43fe04a379
parent eb08529f35ce33ed98c34fb48013f0f4a5fc9635
Author: kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com>
Date: Fri, 16 Dec 2022 22:36:52 +0000
update go-cache to v3.2.0 with support for ignoring errors (#1273)
Diffstat:
6 files changed, 105 insertions(+), 71 deletions(-)
diff --git a/go.mod b/go.mod
@@ -5,7 +5,7 @@ go 1.19
require (
codeberg.org/gruf/go-bytesize v1.0.0
codeberg.org/gruf/go-byteutil v1.0.2
- codeberg.org/gruf/go-cache/v3 v3.1.8
+ codeberg.org/gruf/go-cache/v3 v3.2.0
codeberg.org/gruf/go-debug v1.2.0
codeberg.org/gruf/go-errors/v2 v2.0.2
codeberg.org/gruf/go-kv v1.5.2
diff --git a/go.sum b/go.sum
@@ -69,8 +69,8 @@ codeberg.org/gruf/go-bytesize v1.0.0/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacp
codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
codeberg.org/gruf/go-byteutil v1.0.2 h1:OesVyK5VKWeWdeDR00zRJ+Oy8hjXx1pBhn7WVvcZWVE=
codeberg.org/gruf/go-byteutil v1.0.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
-codeberg.org/gruf/go-cache/v3 v3.1.8 h1:wbUef/QtRstEb7sSpQYHT5CtSFtKkeZr4ZhOTXqOpac=
-codeberg.org/gruf/go-cache/v3 v3.1.8/go.mod h1:h6im2UVGdrGtNt4IVKARVeoW4kAdok5ts7CbH15UWXs=
+codeberg.org/gruf/go-cache/v3 v3.2.0 h1:pHJhS3SqufVnA2bxgzQpBh9Mfsljqulx2ynpy6thTE8=
+codeberg.org/gruf/go-cache/v3 v3.2.0/go.mod h1:d4xafgOjVE+4+82WjIqqJl8NQusXkgUHbkTuXoeB3fA=
codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU=
codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
diff --git a/internal/cache/gts.go b/internal/cache/gts.go
@@ -185,7 +185,7 @@ func (c *gtsCaches) User() *result.Cache[*gtsmodel.User] {
}
func (c *gtsCaches) initAccount() {
- c.account = result.NewSized([]result.Lookup{
+ c.account = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
{Name: "URL"},
@@ -200,7 +200,7 @@ func (c *gtsCaches) initAccount() {
}
func (c *gtsCaches) initBlock() {
- c.block = result.NewSized([]result.Lookup{
+ c.block = result.New([]result.Lookup{
{Name: "ID"},
{Name: "AccountID.TargetAccountID"},
{Name: "URI"},
@@ -220,7 +220,7 @@ func (c *gtsCaches) initDomainBlock() {
}
func (c *gtsCaches) initEmoji() {
- c.emoji = result.NewSized([]result.Lookup{
+ c.emoji = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
{Name: "Shortcode.Domain"},
@@ -234,7 +234,7 @@ func (c *gtsCaches) initEmoji() {
}
func (c *gtsCaches) initEmojiCategory() {
- c.emojiCategory = result.NewSized([]result.Lookup{
+ c.emojiCategory = result.New([]result.Lookup{
{Name: "ID"},
{Name: "Name"},
}, func(c1 *gtsmodel.EmojiCategory) *gtsmodel.EmojiCategory {
@@ -246,7 +246,7 @@ func (c *gtsCaches) initEmojiCategory() {
}
func (c *gtsCaches) initMention() {
- c.mention = result.NewSized([]result.Lookup{
+ c.mention = result.New([]result.Lookup{
{Name: "ID"},
}, func(m1 *gtsmodel.Mention) *gtsmodel.Mention {
m2 := new(gtsmodel.Mention)
@@ -257,7 +257,7 @@ func (c *gtsCaches) initMention() {
}
func (c *gtsCaches) initNotification() {
- c.notification = result.NewSized([]result.Lookup{
+ c.notification = result.New([]result.Lookup{
{Name: "ID"},
}, func(n1 *gtsmodel.Notification) *gtsmodel.Notification {
n2 := new(gtsmodel.Notification)
@@ -268,7 +268,7 @@ func (c *gtsCaches) initNotification() {
}
func (c *gtsCaches) initStatus() {
- c.status = result.NewSized([]result.Lookup{
+ c.status = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
{Name: "URL"},
@@ -282,7 +282,7 @@ func (c *gtsCaches) initStatus() {
// initTombstone will initialize the gtsmodel.Tombstone cache.
func (c *gtsCaches) initTombstone() {
- c.tombstone = result.NewSized([]result.Lookup{
+ c.tombstone = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
}, func(t1 *gtsmodel.Tombstone) *gtsmodel.Tombstone {
@@ -294,7 +294,7 @@ func (c *gtsCaches) initTombstone() {
}
func (c *gtsCaches) initUser() {
- c.user = result.NewSized([]result.Lookup{
+ c.user = result.New([]result.Lookup{
{Name: "ID"},
{Name: "AccountID"},
{Name: "Email"},
diff --git a/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go b/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go
@@ -1,10 +1,12 @@
package result
import (
+ "context"
"reflect"
"time"
"codeberg.org/gruf/go-cache/v3/ttl"
+ "codeberg.org/gruf/go-errors/v2"
)
// Lookup represents a struct object lookup method in the cache.
@@ -16,6 +18,9 @@ type Lookup struct {
// AllowZero indicates whether to accept and cache
// under zero value keys, otherwise ignore them.
AllowZero bool
+
+ // TODO: support toggling case sensitive lookups.
+ // CaseSensitive bool
}
// Cache provides a means of caching value structures, along with
@@ -24,17 +29,13 @@ type Lookup struct {
type Cache[Value any] struct {
cache ttl.Cache[int64, result[Value]] // underlying result cache
lookups structKeys // pre-determined struct lookups
+ ignore func(error) bool // determines cacheable errors
copy func(Value) Value // copies a Value type
next int64 // update key counter
}
-// New returns a new initialized Cache, with given lookups and underlying value copy function.
-func New[Value any](lookups []Lookup, copy func(Value) Value) *Cache[Value] {
- return NewSized(lookups, copy, 64)
-}
-
-// NewSized returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
-func NewSized[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Value] {
+// New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
+func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Value] {
var z Value
// Determine generic type
@@ -63,6 +64,7 @@ func NewSized[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cac
c.cache.Init(0, cap, 0)
c.SetEvictionCallback(nil)
c.SetInvalidateCallback(nil)
+ c.IgnoreErrors(nil)
return c
}
@@ -93,8 +95,8 @@ func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) {
c.cache.SetEvictionCallback(func(item *ttl.Entry[int64, result[Value]]) {
for _, key := range item.Value.Keys {
// Delete key->pkey lookup
- pkeys := key.key.pkeys
- delete(pkeys, key.value)
+ pkeys := key.info.pkeys
+ delete(pkeys, key.key)
}
if item.Value.Error != nil {
@@ -116,8 +118,8 @@ func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
c.cache.SetInvalidateCallback(func(item *ttl.Entry[int64, result[Value]]) {
for _, key := range item.Value.Keys {
// Delete key->pkey lookup
- pkeys := key.key.pkeys
- delete(pkeys, key.value)
+ pkeys := key.info.pkeys
+ delete(pkeys, key.key)
}
if item.Value.Error != nil {
@@ -130,7 +132,23 @@ func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
})
}
-// Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the load function and caching that result.
+// IgnoreErrors allows setting a function hook to determine which error types should / not be cached.
+func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) {
+ if ignore == nil {
+ ignore = func(err error) bool {
+ return errors.Is(
+ err,
+ context.Canceled,
+ context.DeadlineExceeded,
+ )
+ }
+ }
+ c.cache.Lock()
+ c.ignore = ignore
+ c.cache.Unlock()
+}
+
+// Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result.
func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) {
var (
zero Value
@@ -146,7 +164,7 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
// Acquire cache lock
c.cache.Lock()
- // Look for primary key for cache key
+ // Look for primary cache key
pkey, ok := keyInfo.pkeys[ckey]
if ok {
@@ -159,17 +177,28 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
c.cache.Unlock()
if !ok {
- // Generate new result from fresh load.
- res.Value, res.Error = load()
+ // Generate fresh result.
+ value, err := load()
+
+ if err != nil {
+ if c.ignore(err) {
+ // don't cache this error type
+ return zero, err
+ }
+
+ // Store error result.
+ res.Error = err
- if res.Error != nil {
// This load returned an error, only
// store this item under provided key.
- res.Keys = []cachedKey{{
- key: keyInfo,
- value: ckey,
+ res.Keys = []cacheKey{{
+ info: keyInfo,
+ key: ckey,
}}
} else {
+ // Store value result.
+ res.Value = value
+
// This was a successful load, generate keys.
res.Keys = c.lookups.generate(res.Value)
}
@@ -178,8 +207,8 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
c.cache.Lock()
defer c.cache.Unlock()
- // Cache this result
- c.storeResult(res)
+ // Cache result
+ c.store(res)
}
// Catch and return error
@@ -209,8 +238,8 @@ func (c *Cache[Value]) Store(value Value, store func() error) error {
c.cache.Lock()
defer c.cache.Unlock()
- // Cache this result
- c.storeResult(result)
+ // Cache result
+ c.store(result)
return nil
}
@@ -270,22 +299,13 @@ func (c *Cache[Value]) Clear() {
c.cache.Clear()
}
-// Len returns the current length of the cache.
-func (c *Cache[Value]) Len() int {
- return c.cache.Cache.Len()
-}
-
-// Cap returns the maximum capacity of this result cache.
-func (c *Cache[Value]) Cap() int {
- return c.cache.Cache.Cap()
-}
-
-func (c *Cache[Value]) storeResult(res result[Value]) {
+// store will cache this result under all of its required cache keys.
+func (c *Cache[Value]) store(res result[Value]) {
for _, key := range res.Keys {
- pkeys := key.key.pkeys
+ pkeys := key.info.pkeys
// Look for cache primary key
- pkey, ok := pkeys[key.value]
+ pkey, ok := pkeys[key.key]
if ok {
// Get the overlapping result with this key.
@@ -293,11 +313,11 @@ func (c *Cache[Value]) storeResult(res result[Value]) {
// From conflicting entry, drop this key, this
// will prevent eviction cleanup key confusion.
- entry.Value.Keys.drop(key.key.name)
+ entry.Value.Keys.drop(key.info.name)
if len(entry.Value.Keys) == 0 {
// We just over-wrote the only lookup key for
- // this value, so we drop its primary key too
+ // this value, so we drop its primary key too.
c.cache.Cache.Delete(pkey)
}
}
@@ -306,11 +326,14 @@ func (c *Cache[Value]) storeResult(res result[Value]) {
// Get primary key
pkey := c.next
c.next++
+ if pkey > c.next {
+ panic("cache primary key overflow")
+ }
// Store all primary key lookups
for _, key := range res.Keys {
- pkeys := key.key.pkeys
- pkeys[key.value] = pkey
+ pkeys := key.info.pkeys
+ pkeys[key.key] = pkey
}
// Store main entry under primary key, using evict hook if needed
diff --git a/vendor/codeberg.org/gruf/go-cache/v3/result/key.go b/vendor/codeberg.org/gruf/go-cache/v3/result/key.go
@@ -27,8 +27,8 @@ func (sk structKeys) get(name string) *structKey {
// generate will calculate and produce a slice of cache keys the given value
// can be stored under in the, as determined by receiving struct keys.
-func (sk structKeys) generate(a any) []cachedKey {
- var keys []cachedKey
+func (sk structKeys) generate(a any) []cacheKey {
+ var keys []cacheKey
// Get reflected value in order
// to access the struct fields
@@ -43,8 +43,8 @@ func (sk structKeys) generate(a any) []cachedKey {
}
// Acquire byte buffer
- buf := bufpool.Get().(*byteutil.Buffer)
- defer bufpool.Put(buf)
+ buf := getBuf()
+ defer putBuf(buf)
for i := range sk {
// Reset buffer
@@ -68,39 +68,39 @@ func (sk structKeys) generate(a any) []cachedKey {
}
// Append new cached key to slice
- keys = append(keys, cachedKey{
- key: &sk[i],
- value: string(buf.B), // copy
+ keys = append(keys, cacheKey{
+ info: &sk[i],
+ key: string(buf.B), // copy
})
}
return keys
}
-type cacheKeys []cachedKey
+type cacheKeys []cacheKey
// drop will drop the cachedKey with lookup name from receiving cacheKeys slice.
func (ck *cacheKeys) drop(name string) {
_ = *ck // move out of loop
for i := range *ck {
- if (*ck)[i].key.name == name {
+ if (*ck)[i].info.name == name {
(*ck) = append((*ck)[:i], (*ck)[i+1:]...)
break
}
}
}
-// cachedKey represents an actual cached key.
-type cachedKey struct {
- // key is a reference to the structKey this
+// cacheKey represents an actual cached key.
+type cacheKey struct {
+ // info is a reference to the structKey this
// cacheKey is representing. This is a shared
// reference and as such only the structKey.pkeys
// lookup map is expecting to be modified.
- key *structKey
+ info *structKey
// value is the actual string representing
// this cache key for hashmap lookups.
- value string
+ key string
}
// structKey represents a list of struct fields
@@ -196,9 +196,9 @@ func genKey(parts ...any) string {
panic("no key parts provided")
}
- // Acquire buffer and reset
- buf := bufpool.Get().(*byteutil.Buffer)
- defer bufpool.Put(buf)
+ // Acquire byte buffer
+ buf := getBuf()
+ defer putBuf(buf)
buf.Reset()
// Encode each key part
@@ -222,8 +222,19 @@ func isExported(fnName string) bool {
// bufpool provides a memory pool of byte
// buffers use when encoding key types.
-var bufpool = sync.Pool{
+var bufPool = sync.Pool{
New: func() any {
return &byteutil.Buffer{B: make([]byte, 0, 512)}
},
}
+
+func getBuf() *byteutil.Buffer {
+ return bufPool.Get().(*byteutil.Buffer)
+}
+
+func putBuf(buf *byteutil.Buffer) {
+ if buf.Cap() > int(^uint16(0)) {
+ return // drop large bufs
+ }
+ bufPool.Put(buf)
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
@@ -13,7 +13,7 @@ codeberg.org/gruf/go-bytesize
# codeberg.org/gruf/go-byteutil v1.0.2
## explicit; go 1.16
codeberg.org/gruf/go-byteutil
-# codeberg.org/gruf/go-cache/v3 v3.1.8
+# codeberg.org/gruf/go-cache/v3 v3.2.0
## explicit; go 1.19
codeberg.org/gruf/go-cache/v3
codeberg.org/gruf/go-cache/v3/result