gtsocial-umbx

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs | README | LICENSE

cache.go (10590B)


      1 package result
      2 
      3 import (
      4 	"context"
      5 	"reflect"
      6 	"time"
      7 	_ "unsafe"
      8 
      9 	"codeberg.org/gruf/go-cache/v3/ttl"
     10 	"codeberg.org/gruf/go-errors/v2"
     11 )
     12 
     13 // Lookup represents a struct object lookup method in the cache.
     14 type Lookup struct {
     15 	// Name is a period ('.') separated string
     16 	// of struct fields this Key encompasses.
     17 	Name string
     18 
     19 	// AllowZero indicates whether to accept and cache
     20 	// under zero value keys, otherwise ignore them.
     21 	AllowZero bool
     22 
     23 	// Multi allows specifying a key capable of storing
     24 	// multiple results. Note this only supports invalidate.
     25 	Multi bool
     26 
     27 	// TODO: support toggling case sensitive lookups.
     28 	// CaseSensitive bool
     29 }
     30 
     31 // Cache provides a means of caching value structures, along with
     32 // the results of attempting to load them. An example usecase of this
     33 // cache would be in wrapping a database, allowing caching of sql.ErrNoRows.
     34 type Cache[Value any] struct {
     35 	cache   ttl.Cache[int64, result[Value]] // underlying result cache
     36 	invalid func(Value)                     // store unwrapped invalidate callback.
     37 	lookups structKeys                      // pre-determined struct lookups
     38 	ignore  func(error) bool                // determines cacheable errors
     39 	copy    func(Value) Value               // copies a Value type
     40 	next    int64                           // update key counter
     41 }
     42 
     43 // New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
     44 func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Value] {
     45 	var z Value
     46 
     47 	// Determine generic type
     48 	t := reflect.TypeOf(z)
     49 
     50 	// Iteratively deref pointer type
     51 	for t.Kind() == reflect.Pointer {
     52 		t = t.Elem()
     53 	}
     54 
     55 	// Ensure that this is a struct type
     56 	if t.Kind() != reflect.Struct {
     57 		panic("generic parameter type must be struct (or ptr to)")
     58 	}
     59 
     60 	// Allocate new cache object
     61 	c := &Cache[Value]{copy: copy}
     62 	c.lookups = make([]structKey, len(lookups))
     63 
     64 	for i, lookup := range lookups {
     65 		// Create keyed field info for lookup
     66 		c.lookups[i] = newStructKey(lookup, t)
     67 	}
     68 
     69 	// Create and initialize underlying cache
     70 	c.cache.Init(0, cap, 0)
     71 	c.SetEvictionCallback(nil)
     72 	c.SetInvalidateCallback(nil)
     73 	c.IgnoreErrors(nil)
     74 	return c
     75 }
     76 
     77 // Start will start the cache background eviction routine with given sweep frequency. If already
     78 // running or a freq <= 0 provided, this is a no-op. This will block until eviction routine started.
     79 func (c *Cache[Value]) Start(freq time.Duration) bool {
     80 	return c.cache.Start(freq)
     81 }
     82 
     83 // Stop will stop cache background eviction routine. If not running this
     84 // is a no-op. This will block until the eviction routine has stopped.
     85 func (c *Cache[Value]) Stop() bool {
     86 	return c.cache.Stop()
     87 }
     88 
     89 // SetTTL sets the cache item TTL. Update can be specified to force updates of existing items
     90 // in the cache, this will simply add the change in TTL to their current expiry time.
     91 func (c *Cache[Value]) SetTTL(ttl time.Duration, update bool) {
     92 	c.cache.SetTTL(ttl, update)
     93 }
     94 
     95 // SetEvictionCallback sets the eviction callback to the provided hook.
     96 func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) {
     97 	if hook == nil {
     98 		// Ensure non-nil hook.
     99 		hook = func(Value) {}
    100 	}
    101 	c.cache.SetEvictionCallback(func(pkey int64, res result[Value]) {
    102 		c.cache.Lock()
    103 		for _, key := range res.Keys {
    104 			// Delete key->pkey lookup
    105 			pkeys := key.info.pkeys
    106 			delete(pkeys, key.key)
    107 		}
    108 		c.cache.Unlock()
    109 
    110 		if res.Error != nil {
    111 			// Skip error hooks
    112 			return
    113 		}
    114 
    115 		// Call user hook.
    116 		hook(res.Value)
    117 	})
    118 }
    119 
    120 // SetInvalidateCallback sets the invalidate callback to the provided hook.
    121 func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
    122 	if hook == nil {
    123 		// Ensure non-nil hook.
    124 		hook = func(Value) {}
    125 	} // store hook.
    126 	c.invalid = hook
    127 	c.cache.SetInvalidateCallback(func(pkey int64, res result[Value]) {
    128 		c.cache.Lock()
    129 		for _, key := range res.Keys {
    130 			// Delete key->pkey lookup
    131 			pkeys := key.info.pkeys
    132 			delete(pkeys, key.key)
    133 		}
    134 		c.cache.Unlock()
    135 
    136 		if res.Error != nil {
    137 			// Skip error hooks
    138 			return
    139 		}
    140 
    141 		// Call user hook.
    142 		hook(res.Value)
    143 	})
    144 }
    145 
    146 // IgnoreErrors allows setting a function hook to determine which error types should / not be cached.
    147 func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) {
    148 	if ignore == nil {
    149 		ignore = func(err error) bool {
    150 			return errors.Comparable(
    151 				err,
    152 				context.Canceled,
    153 				context.DeadlineExceeded,
    154 			)
    155 		}
    156 	}
    157 	c.cache.Lock()
    158 	c.ignore = ignore
    159 	c.cache.Unlock()
    160 }
    161 
    162 // Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result.
    163 func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) {
    164 	var (
    165 		zero Value
    166 		res  result[Value]
    167 		ok   bool
    168 	)
    169 
    170 	// Get lookup key info by name.
    171 	keyInfo := c.lookups.get(lookup)
    172 	if !keyInfo.unique {
    173 		panic("non-unique lookup does not support load: " + lookup)
    174 	}
    175 
    176 	// Generate cache key string.
    177 	ckey := keyInfo.genKey(keyParts)
    178 
    179 	// Acquire cache lock
    180 	c.cache.Lock()
    181 
    182 	// Look for primary cache key
    183 	pkeys := keyInfo.pkeys[ckey]
    184 
    185 	if ok = (len(pkeys) > 0); ok {
    186 		var entry *ttl.Entry[int64, result[Value]]
    187 
    188 		// Fetch the result for primary key
    189 		entry, ok = c.cache.Cache.Get(pkeys[0])
    190 		if ok {
    191 			// Since the invalidation / eviction hooks acquire a mutex
    192 			// lock separately, and only at this point are the pkeys
    193 			// updated, there is a chance that a primary key may return
    194 			// no matching entry. Hence we have to check for it here.
    195 			res = entry.Value
    196 		}
    197 	}
    198 
    199 	// Done with lock
    200 	c.cache.Unlock()
    201 
    202 	if !ok {
    203 		// Generate fresh result.
    204 		value, err := load()
    205 
    206 		if err != nil {
    207 			if c.ignore(err) {
    208 				// don't cache this error type
    209 				return zero, err
    210 			}
    211 
    212 			// Store error result.
    213 			res.Error = err
    214 
    215 			// This load returned an error, only
    216 			// store this item under provided key.
    217 			res.Keys = []cacheKey{{
    218 				info: keyInfo,
    219 				key:  ckey,
    220 			}}
    221 		} else {
    222 			// Store value result.
    223 			res.Value = value
    224 
    225 			// This was a successful load, generate keys.
    226 			res.Keys = c.lookups.generate(res.Value)
    227 		}
    228 
    229 		var evict func()
    230 
    231 		// Acquire cache lock.
    232 		c.cache.Lock()
    233 		defer func() {
    234 			c.cache.Unlock()
    235 			if evict != nil {
    236 				evict()
    237 			}
    238 		}()
    239 
    240 		// Store result in cache.
    241 		evict = c.store(res)
    242 	}
    243 
    244 	// Catch and return error
    245 	if res.Error != nil {
    246 		return zero, res.Error
    247 	}
    248 
    249 	// Return a copy of value from cache
    250 	return c.copy(res.Value), nil
    251 }
    252 
    253 // Store will call the given store function, and on success store the value in the cache as a positive result.
    254 func (c *Cache[Value]) Store(value Value, store func() error) error {
    255 	// Attempt to store this value.
    256 	if err := store(); err != nil {
    257 		return err
    258 	}
    259 
    260 	// Prepare cached result.
    261 	result := result[Value]{
    262 		Keys:  c.lookups.generate(value),
    263 		Value: c.copy(value),
    264 		Error: nil,
    265 	}
    266 
    267 	var evict func()
    268 
    269 	// Acquire cache lock.
    270 	c.cache.Lock()
    271 	defer func() {
    272 		c.cache.Unlock()
    273 		if evict != nil {
    274 			evict()
    275 		}
    276 	}()
    277 
    278 	// Store result in cache.
    279 	evict = c.store(result)
    280 
    281 	// Call invalidate.
    282 	c.invalid(value)
    283 
    284 	return nil
    285 }
    286 
    287 // Has checks the cache for a positive result under the given lookup and key parts.
    288 func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
    289 	var res result[Value]
    290 	var ok bool
    291 
    292 	// Get lookup key info by name.
    293 	keyInfo := c.lookups.get(lookup)
    294 	if !keyInfo.unique {
    295 		panic("non-unique lookup does not support has: " + lookup)
    296 	}
    297 
    298 	// Generate cache key string.
    299 	ckey := keyInfo.genKey(keyParts)
    300 
    301 	// Acquire cache lock
    302 	c.cache.Lock()
    303 
    304 	// Look for primary key for cache key
    305 	pkeys := keyInfo.pkeys[ckey]
    306 
    307 	if ok = (len(pkeys) > 0); ok {
    308 		var entry *ttl.Entry[int64, result[Value]]
    309 
    310 		// Fetch the result for primary key
    311 		entry, ok = c.cache.Cache.Get(pkeys[0])
    312 		if ok {
    313 			// Since the invalidation / eviction hooks acquire a mutex
    314 			// lock separately, and only at this point are the pkeys
    315 			// updated, there is a chance that a primary key may return
    316 			// no matching entry. Hence we have to check for it here.
    317 			res = entry.Value
    318 		}
    319 	}
    320 
    321 	// Done with lock
    322 	c.cache.Unlock()
    323 
    324 	// Check for non-error result.
    325 	return ok && (res.Error == nil)
    326 }
    327 
    328 // Invalidate will invalidate any result from the cache found under given lookup and key parts.
    329 func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) {
    330 	// Get lookup key info by name.
    331 	keyInfo := c.lookups.get(lookup)
    332 
    333 	// Generate cache key string.
    334 	ckey := keyInfo.genKey(keyParts)
    335 
    336 	// Look for primary key for cache key
    337 	c.cache.Lock()
    338 	pkeys := keyInfo.pkeys[ckey]
    339 	delete(keyInfo.pkeys, ckey)
    340 	c.cache.Unlock()
    341 
    342 	// Invalidate all primary keys.
    343 	c.cache.InvalidateAll(pkeys...)
    344 }
    345 
    346 // Clear empties the cache, calling the invalidate callback.
    347 func (c *Cache[Value]) Clear() { c.cache.Clear() }
    348 
    349 // store will cache this result under all of its required cache keys.
    350 func (c *Cache[Value]) store(res result[Value]) (evict func()) {
    351 	// Get primary key
    352 	pnext := c.next
    353 	c.next++
    354 	if pnext > c.next {
    355 		panic("cache primary key overflow")
    356 	}
    357 
    358 	for _, key := range res.Keys {
    359 		// Look for cache primary keys.
    360 		pkeys := key.info.pkeys[key.key]
    361 
    362 		if key.info.unique && len(pkeys) > 0 {
    363 			for _, conflict := range pkeys {
    364 				// Get the overlapping result with this key.
    365 				entry, _ := c.cache.Cache.Get(conflict)
    366 
    367 				// From conflicting entry, drop this key, this
    368 				// will prevent eviction cleanup key confusion.
    369 				entry.Value.Keys.drop(key.info.name)
    370 
    371 				if len(entry.Value.Keys) == 0 {
    372 					// We just over-wrote the only lookup key for
    373 					// this value, so we drop its primary key too.
    374 					c.cache.Cache.Delete(conflict)
    375 				}
    376 			}
    377 
    378 			// Drop existing.
    379 			pkeys = pkeys[:0]
    380 		}
    381 
    382 		// Store primary key lookup.
    383 		pkeys = append(pkeys, pnext)
    384 		key.info.pkeys[key.key] = pkeys
    385 	}
    386 
    387 	// Store main entry under primary key, using evict hook if needed
    388 	c.cache.Cache.SetWithHook(pnext, &ttl.Entry[int64, result[Value]]{
    389 		Expiry: c.expiry(),
    390 		Key:    pnext,
    391 		Value:  res,
    392 	}, func(_ int64, item *ttl.Entry[int64, result[Value]]) {
    393 		evict = func() { c.cache.Evict(item.Key, item.Value) }
    394 	})
    395 
    396 	return evict
    397 }
    398 
    399 //go:linkname runtime_nanotime runtime.nanotime
    400 func runtime_nanotime() uint64
    401 
    402 // expiry returns an the next expiry time to use for an entry,
    403 // which is equivalent to time.Now().Add(ttl), or zero if disabled.
    404 func (c *Cache[Value]) expiry() uint64 {
    405 	if ttl := c.cache.TTL; ttl > 0 {
    406 		return runtime_nanotime() +
    407 			uint64(c.cache.TTL)
    408 	}
    409 	return 0
    410 }
    411 
    412 type result[Value any] struct {
    413 	// keys accessible under
    414 	Keys cacheKeys
    415 
    416 	// cached value
    417 	Value Value
    418 
    419 	// cached error
    420 	Error error
    421 }