mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2025-12-31 00:34:25 +00:00
Cache package is pure infrastructure. Cache invalidates on create/delete from the API
Some checks failed
Tests / Integration Tests (push) Failing after 9s
Build , Vet Test, and Lint / Lint Code (push) Successful in 8m13s
Build , Vet Test, and Lint / Build (push) Successful in -24m36s
Build , Vet Test, and Lint / Run Vet Tests (1.23.x) (push) Successful in -25m6s
Build , Vet Test, and Lint / Run Vet Tests (1.24.x) (push) Successful in -24m33s
Tests / Unit Tests (push) Failing after -25m39s
Some checks failed
Tests / Integration Tests (push) Failing after 9s
Build , Vet Test, and Lint / Lint Code (push) Successful in 8m13s
Build , Vet Test, and Lint / Build (push) Successful in -24m36s
Build , Vet Test, and Lint / Run Vet Tests (1.23.x) (push) Successful in -25m6s
Build , Vet Test, and Lint / Run Vet Tests (1.24.x) (push) Successful in -24m33s
Tests / Unit Tests (push) Failing after -25m39s
This commit is contained in:
20
pkg/cache/cache_manager.go
vendored
20
pkg/cache/cache_manager.go
vendored
@@ -57,11 +57,31 @@ func (c *Cache) SetBytes(ctx context.Context, key string, value []byte, ttl time
|
||||
return c.provider.Set(ctx, key, value, ttl)
|
||||
}
|
||||
|
||||
// SetWithTags serializes and stores a value in the cache with the specified TTL and tags.
|
||||
func (c *Cache) SetWithTags(ctx context.Context, key string, value interface{}, ttl time.Duration, tags []string) error {
|
||||
data, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize: %w", err)
|
||||
}
|
||||
|
||||
return c.provider.SetWithTags(ctx, key, data, ttl, tags)
|
||||
}
|
||||
|
||||
// SetBytesWithTags stores raw bytes in the cache with the specified TTL and tags.
|
||||
func (c *Cache) SetBytesWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||
return c.provider.SetWithTags(ctx, key, value, ttl, tags)
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (c *Cache) Delete(ctx context.Context, key string) error {
|
||||
return c.provider.Delete(ctx, key)
|
||||
}
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
func (c *Cache) DeleteByTag(ctx context.Context, tag string) error {
|
||||
return c.provider.DeleteByTag(ctx, tag)
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
func (c *Cache) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||
return c.provider.DeleteByPattern(ctx, pattern)
|
||||
|
||||
8
pkg/cache/provider.go
vendored
8
pkg/cache/provider.go
vendored
@@ -15,9 +15,17 @@ type Provider interface {
|
||||
// If ttl is 0, the item never expires.
|
||||
Set(ctx context.Context, key string, value []byte, ttl time.Duration) error
|
||||
|
||||
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||
// Tags can be used to invalidate groups of related keys.
|
||||
// If ttl is 0, the item never expires.
|
||||
SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
DeleteByTag(ctx context.Context, tag string) error
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
// Pattern syntax depends on the provider implementation.
|
||||
DeleteByPattern(ctx context.Context, pattern string) error
|
||||
|
||||
140
pkg/cache/provider_memcache.go
vendored
140
pkg/cache/provider_memcache.go
vendored
@@ -2,6 +2,7 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -97,8 +98,115 @@ func (m *MemcacheProvider) Set(ctx context.Context, key string, value []byte, tt
|
||||
return m.client.Set(item)
|
||||
}
|
||||
|
||||
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||
// Note: Tag support in Memcache is limited and less efficient than Redis.
|
||||
func (m *MemcacheProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||
if ttl == 0 {
|
||||
ttl = m.options.DefaultTTL
|
||||
}
|
||||
|
||||
expiration := int32(ttl.Seconds())
|
||||
|
||||
// Set the main value
|
||||
item := &memcache.Item{
|
||||
Key: key,
|
||||
Value: value,
|
||||
Expiration: expiration,
|
||||
}
|
||||
if err := m.client.Set(item); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store tags for this key
|
||||
if len(tags) > 0 {
|
||||
tagsData, err := json.Marshal(tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal tags: %w", err)
|
||||
}
|
||||
|
||||
tagsItem := &memcache.Item{
|
||||
Key: fmt.Sprintf("cache:tags:%s", key),
|
||||
Value: tagsData,
|
||||
Expiration: expiration,
|
||||
}
|
||||
if err := m.client.Set(tagsItem); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add key to each tag's key list
|
||||
for _, tag := range tags {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
|
||||
// Get existing keys for this tag
|
||||
var keys []string
|
||||
if item, err := m.client.Get(tagKey); err == nil {
|
||||
_ = json.Unmarshal(item.Value, &keys)
|
||||
}
|
||||
|
||||
// Add current key if not already present
|
||||
found := false
|
||||
for _, k := range keys {
|
||||
if k == key {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
// Store updated key list
|
||||
keysData, err := json.Marshal(keys)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
tagItem := &memcache.Item{
|
||||
Key: tagKey,
|
||||
Value: keysData,
|
||||
Expiration: expiration + 3600, // Give tag lists longer TTL
|
||||
}
|
||||
_ = m.client.Set(tagItem)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (m *MemcacheProvider) Delete(ctx context.Context, key string) error {
|
||||
// Get tags for this key
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
if item, err := m.client.Get(tagsKey); err == nil {
|
||||
var tags []string
|
||||
if err := json.Unmarshal(item.Value, &tags); err == nil {
|
||||
// Remove key from each tag's key list
|
||||
for _, tag := range tags {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
if tagItem, err := m.client.Get(tagKey); err == nil {
|
||||
var keys []string
|
||||
if err := json.Unmarshal(tagItem.Value, &keys); err == nil {
|
||||
// Remove current key from the list
|
||||
newKeys := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
if k != key {
|
||||
newKeys = append(newKeys, k)
|
||||
}
|
||||
}
|
||||
// Update the tag's key list
|
||||
if keysData, err := json.Marshal(newKeys); err == nil {
|
||||
tagItem.Value = keysData
|
||||
_ = m.client.Set(tagItem)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Delete the tags key
|
||||
_ = m.client.Delete(tagsKey)
|
||||
}
|
||||
|
||||
// Delete the actual key
|
||||
err := m.client.Delete(key)
|
||||
if err == memcache.ErrCacheMiss {
|
||||
return nil
|
||||
@@ -106,6 +214,38 @@ func (m *MemcacheProvider) Delete(ctx context.Context, key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
func (m *MemcacheProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
|
||||
// Get all keys associated with this tag
|
||||
item, err := m.client.Get(tagKey)
|
||||
if err == memcache.ErrCacheMiss {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var keys []string
|
||||
if err := json.Unmarshal(item.Value, &keys); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal tag keys: %w", err)
|
||||
}
|
||||
|
||||
// Delete all keys
|
||||
for _, key := range keys {
|
||||
_ = m.client.Delete(key)
|
||||
// Also delete the tags key for this cache key
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
_ = m.client.Delete(tagsKey)
|
||||
}
|
||||
|
||||
// Delete the tag key itself
|
||||
_ = m.client.Delete(tagKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
// Note: Memcache does not support pattern-based deletion natively.
|
||||
// This is a no-op for memcache and returns an error.
|
||||
|
||||
118
pkg/cache/provider_memory.go
vendored
118
pkg/cache/provider_memory.go
vendored
@@ -15,6 +15,7 @@ type memoryItem struct {
|
||||
Expiration time.Time
|
||||
LastAccess time.Time
|
||||
HitCount int64
|
||||
Tags []string
|
||||
}
|
||||
|
||||
// isExpired checks if the item has expired.
|
||||
@@ -27,11 +28,12 @@ func (m *memoryItem) isExpired() bool {
|
||||
|
||||
// MemoryProvider is an in-memory implementation of the Provider interface.
|
||||
type MemoryProvider struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]*memoryItem
|
||||
options *Options
|
||||
hits atomic.Int64
|
||||
misses atomic.Int64
|
||||
mu sync.RWMutex
|
||||
items map[string]*memoryItem
|
||||
tagToKeys map[string]map[string]struct{} // tag -> set of keys
|
||||
options *Options
|
||||
hits atomic.Int64
|
||||
misses atomic.Int64
|
||||
}
|
||||
|
||||
// NewMemoryProvider creates a new in-memory cache provider.
|
||||
@@ -44,8 +46,9 @@ func NewMemoryProvider(opts *Options) *MemoryProvider {
|
||||
}
|
||||
|
||||
return &MemoryProvider{
|
||||
items: make(map[string]*memoryItem),
|
||||
options: opts,
|
||||
items: make(map[string]*memoryItem),
|
||||
tagToKeys: make(map[string]map[string]struct{}),
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,15 +117,116 @@ func (m *MemoryProvider) Set(ctx context.Context, key string, value []byte, ttl
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||
func (m *MemoryProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if ttl == 0 {
|
||||
ttl = m.options.DefaultTTL
|
||||
}
|
||||
|
||||
var expiration time.Time
|
||||
if ttl > 0 {
|
||||
expiration = time.Now().Add(ttl)
|
||||
}
|
||||
|
||||
// Check max size and evict if necessary
|
||||
if m.options.MaxSize > 0 && len(m.items) >= m.options.MaxSize {
|
||||
if _, exists := m.items[key]; !exists {
|
||||
m.evictOne()
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old tag associations if key exists
|
||||
if oldItem, exists := m.items[key]; exists {
|
||||
for _, tag := range oldItem.Tags {
|
||||
if keySet, ok := m.tagToKeys[tag]; ok {
|
||||
delete(keySet, key)
|
||||
if len(keySet) == 0 {
|
||||
delete(m.tagToKeys, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the item
|
||||
m.items[key] = &memoryItem{
|
||||
Value: value,
|
||||
Expiration: expiration,
|
||||
LastAccess: time.Now(),
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
// Add new tag associations
|
||||
for _, tag := range tags {
|
||||
if m.tagToKeys[tag] == nil {
|
||||
m.tagToKeys[tag] = make(map[string]struct{})
|
||||
}
|
||||
m.tagToKeys[tag][key] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (m *MemoryProvider) Delete(ctx context.Context, key string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Remove tag associations
|
||||
if item, exists := m.items[key]; exists {
|
||||
for _, tag := range item.Tags {
|
||||
if keySet, ok := m.tagToKeys[tag]; ok {
|
||||
delete(keySet, key)
|
||||
if len(keySet) == 0 {
|
||||
delete(m.tagToKeys, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(m.items, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
func (m *MemoryProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Get all keys associated with this tag
|
||||
keySet, exists := m.tagToKeys[tag]
|
||||
if !exists {
|
||||
return nil // No keys with this tag
|
||||
}
|
||||
|
||||
// Delete all items with this tag
|
||||
for key := range keySet {
|
||||
if item, ok := m.items[key]; ok {
|
||||
// Remove this tag from the item's tag list
|
||||
newTags := make([]string, 0, len(item.Tags))
|
||||
for _, t := range item.Tags {
|
||||
if t != tag {
|
||||
newTags = append(newTags, t)
|
||||
}
|
||||
}
|
||||
|
||||
// If item has no more tags, delete it
|
||||
// Otherwise update its tags
|
||||
if len(newTags) == 0 {
|
||||
delete(m.items, key)
|
||||
} else {
|
||||
item.Tags = newTags
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the tag mapping
|
||||
delete(m.tagToKeys, tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
func (m *MemoryProvider) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||
m.mu.Lock()
|
||||
|
||||
86
pkg/cache/provider_redis.go
vendored
86
pkg/cache/provider_redis.go
vendored
@@ -103,9 +103,93 @@ func (r *RedisProvider) Set(ctx context.Context, key string, value []byte, ttl t
|
||||
return r.client.Set(ctx, key, value, ttl).Err()
|
||||
}
|
||||
|
||||
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||
func (r *RedisProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||
if ttl == 0 {
|
||||
ttl = r.options.DefaultTTL
|
||||
}
|
||||
|
||||
pipe := r.client.Pipeline()
|
||||
|
||||
// Set the value
|
||||
pipe.Set(ctx, key, value, ttl)
|
||||
|
||||
// Add key to each tag's set
|
||||
for _, tag := range tags {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
pipe.SAdd(ctx, tagKey, key)
|
||||
// Set expiration on tag set (longer than cache items to ensure cleanup)
|
||||
if ttl > 0 {
|
||||
pipe.Expire(ctx, tagKey, ttl+time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
// Store tags for this key for later cleanup
|
||||
if len(tags) > 0 {
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
pipe.SAdd(ctx, tagsKey, tags)
|
||||
if ttl > 0 {
|
||||
pipe.Expire(ctx, tagsKey, ttl)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (r *RedisProvider) Delete(ctx context.Context, key string) error {
|
||||
return r.client.Del(ctx, key).Err()
|
||||
pipe := r.client.Pipeline()
|
||||
|
||||
// Get tags for this key
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
tags, err := r.client.SMembers(ctx, tagsKey).Result()
|
||||
if err == nil && len(tags) > 0 {
|
||||
// Remove key from each tag set
|
||||
for _, tag := range tags {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
pipe.SRem(ctx, tagKey, key)
|
||||
}
|
||||
// Delete the tags key
|
||||
pipe.Del(ctx, tagsKey)
|
||||
}
|
||||
|
||||
// Delete the actual key
|
||||
pipe.Del(ctx, key)
|
||||
|
||||
_, err = pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
func (r *RedisProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
|
||||
// Get all keys associated with this tag
|
||||
keys, err := r.client.SMembers(ctx, tagKey).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
pipe := r.client.Pipeline()
|
||||
|
||||
// Delete all keys and their tag associations
|
||||
for _, key := range keys {
|
||||
pipe.Del(ctx, key)
|
||||
// Also delete the tags key for this cache key
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
pipe.Del(ctx, tagsKey)
|
||||
}
|
||||
|
||||
// Delete the tag set itself
|
||||
pipe.Del(ctx, tagKey)
|
||||
|
||||
_, err = pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
|
||||
127
pkg/cache/query_cache.go
vendored
127
pkg/cache/query_cache.go
vendored
@@ -1,127 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// QueryCacheKey represents the components used to build a cache key for query total count
|
||||
type QueryCacheKey struct {
|
||||
TableName string `json:"table_name"`
|
||||
Filters []common.FilterOption `json:"filters"`
|
||||
Sort []common.SortOption `json:"sort"`
|
||||
CustomSQLWhere string `json:"custom_sql_where,omitempty"`
|
||||
CustomSQLOr string `json:"custom_sql_or,omitempty"`
|
||||
Expand []ExpandOptionKey `json:"expand,omitempty"`
|
||||
Distinct bool `json:"distinct,omitempty"`
|
||||
CursorForward string `json:"cursor_forward,omitempty"`
|
||||
CursorBackward string `json:"cursor_backward,omitempty"`
|
||||
}
|
||||
|
||||
// ExpandOptionKey represents expand options for cache key
|
||||
type ExpandOptionKey struct {
|
||||
Relation string `json:"relation"`
|
||||
Where string `json:"where,omitempty"`
|
||||
}
|
||||
|
||||
// BuildQueryCacheKey builds a cache key from query parameters for total count caching
|
||||
// This is used to cache the total count of records matching a query
|
||||
func BuildQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption, customWhere, customOr string) string {
|
||||
key := QueryCacheKey{
|
||||
TableName: tableName,
|
||||
Filters: filters,
|
||||
Sort: sort,
|
||||
CustomSQLWhere: customWhere,
|
||||
CustomSQLOr: customOr,
|
||||
}
|
||||
|
||||
// Serialize to JSON for consistent hashing
|
||||
jsonData, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
// Fallback to simple string concatenation if JSON fails
|
||||
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s", tableName, filters, sort, customWhere, customOr))
|
||||
}
|
||||
|
||||
return hashString(string(jsonData))
|
||||
}
|
||||
|
||||
// BuildExtendedQueryCacheKey builds a cache key for extended query options (restheadspec)
|
||||
// Includes expand, distinct, and cursor pagination options
|
||||
func BuildExtendedQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption,
|
||||
customWhere, customOr string, expandOpts []interface{}, distinct bool, cursorFwd, cursorBwd string) string {
|
||||
|
||||
key := QueryCacheKey{
|
||||
TableName: tableName,
|
||||
Filters: filters,
|
||||
Sort: sort,
|
||||
CustomSQLWhere: customWhere,
|
||||
CustomSQLOr: customOr,
|
||||
Distinct: distinct,
|
||||
CursorForward: cursorFwd,
|
||||
CursorBackward: cursorBwd,
|
||||
}
|
||||
|
||||
// Convert expand options to cache key format
|
||||
if len(expandOpts) > 0 {
|
||||
key.Expand = make([]ExpandOptionKey, 0, len(expandOpts))
|
||||
for _, exp := range expandOpts {
|
||||
// Type assert to get the expand option fields we care about for caching
|
||||
if expMap, ok := exp.(map[string]interface{}); ok {
|
||||
expKey := ExpandOptionKey{}
|
||||
if rel, ok := expMap["relation"].(string); ok {
|
||||
expKey.Relation = rel
|
||||
}
|
||||
if where, ok := expMap["where"].(string); ok {
|
||||
expKey.Where = where
|
||||
}
|
||||
key.Expand = append(key.Expand, expKey)
|
||||
}
|
||||
}
|
||||
// Sort expand options for consistent hashing (already sorted by relation name above)
|
||||
}
|
||||
|
||||
// Serialize to JSON for consistent hashing
|
||||
jsonData, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
// Fallback to simple string concatenation if JSON fails
|
||||
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s_%v_%v_%s_%s",
|
||||
tableName, filters, sort, customWhere, customOr, expandOpts, distinct, cursorFwd, cursorBwd))
|
||||
}
|
||||
|
||||
return hashString(string(jsonData))
|
||||
}
|
||||
|
||||
// hashString computes SHA256 hash of a string
|
||||
func hashString(s string) string {
|
||||
h := sha256.New()
|
||||
h.Write([]byte(s))
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// GetQueryTotalCacheKey returns a formatted cache key for storing/retrieving total count
|
||||
func GetQueryTotalCacheKey(hash string) string {
|
||||
return fmt.Sprintf("query_total:%s", hash)
|
||||
}
|
||||
|
||||
// CachedTotal represents a cached total count
|
||||
type CachedTotal struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// InvalidateCacheForTable removes all cached totals for a specific table
|
||||
// This should be called when data in the table changes (insert/update/delete)
|
||||
func InvalidateCacheForTable(ctx context.Context, tableName string) error {
|
||||
cache := GetDefaultCache()
|
||||
|
||||
// Build a pattern to match all query totals for this table
|
||||
// Note: This requires pattern matching support in the provider
|
||||
pattern := fmt.Sprintf("query_total:*%s*", strings.ToLower(tableName))
|
||||
|
||||
return cache.DeleteByPattern(ctx, pattern)
|
||||
}
|
||||
151
pkg/cache/query_cache_test.go
vendored
151
pkg/cache/query_cache_test.go
vendored
@@ -1,151 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
func TestBuildQueryCacheKey(t *testing.T) {
|
||||
filters := []common.FilterOption{
|
||||
{Column: "name", Operator: "eq", Value: "test"},
|
||||
{Column: "age", Operator: "gt", Value: 25},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "name", Direction: "asc"},
|
||||
}
|
||||
|
||||
// Generate cache key
|
||||
key1 := BuildQueryCacheKey("users", filters, sorts, "status = 'active'", "")
|
||||
|
||||
// Same parameters should generate same key
|
||||
key2 := BuildQueryCacheKey("users", filters, sorts, "status = 'active'", "")
|
||||
|
||||
if key1 != key2 {
|
||||
t.Errorf("Expected same cache keys for identical parameters, got %s and %s", key1, key2)
|
||||
}
|
||||
|
||||
// Different parameters should generate different key
|
||||
key3 := BuildQueryCacheKey("users", filters, sorts, "status = 'inactive'", "")
|
||||
|
||||
if key1 == key3 {
|
||||
t.Errorf("Expected different cache keys for different parameters, got %s and %s", key1, key3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildExtendedQueryCacheKey(t *testing.T) {
|
||||
filters := []common.FilterOption{
|
||||
{Column: "name", Operator: "eq", Value: "test"},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "name", Direction: "asc"},
|
||||
}
|
||||
expandOpts := []interface{}{
|
||||
map[string]interface{}{
|
||||
"relation": "posts",
|
||||
"where": "status = 'published'",
|
||||
},
|
||||
}
|
||||
|
||||
// Generate cache key
|
||||
key1 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, false, "", "")
|
||||
|
||||
// Same parameters should generate same key
|
||||
key2 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, false, "", "")
|
||||
|
||||
if key1 != key2 {
|
||||
t.Errorf("Expected same cache keys for identical parameters")
|
||||
}
|
||||
|
||||
// Different distinct value should generate different key
|
||||
key3 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, true, "", "")
|
||||
|
||||
if key1 == key3 {
|
||||
t.Errorf("Expected different cache keys for different distinct values")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetQueryTotalCacheKey(t *testing.T) {
|
||||
hash := "abc123"
|
||||
key := GetQueryTotalCacheKey(hash)
|
||||
|
||||
expected := "query_total:abc123"
|
||||
if key != expected {
|
||||
t.Errorf("Expected %s, got %s", expected, key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedTotalIntegration(t *testing.T) {
|
||||
// Initialize cache with memory provider for testing
|
||||
UseMemory(&Options{
|
||||
DefaultTTL: 1 * time.Minute,
|
||||
MaxSize: 100,
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test data
|
||||
filters := []common.FilterOption{
|
||||
{Column: "status", Operator: "eq", Value: "active"},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "created_at", Direction: "desc"},
|
||||
}
|
||||
|
||||
// Build cache key
|
||||
cacheKeyHash := BuildQueryCacheKey("test_table", filters, sorts, "", "")
|
||||
cacheKey := GetQueryTotalCacheKey(cacheKeyHash)
|
||||
|
||||
// Store a total count in cache
|
||||
totalToCache := CachedTotal{Total: 42}
|
||||
err := GetDefaultCache().Set(ctx, cacheKey, totalToCache, time.Minute)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set cache: %v", err)
|
||||
}
|
||||
|
||||
// Retrieve from cache
|
||||
var cachedTotal CachedTotal
|
||||
err = GetDefaultCache().Get(ctx, cacheKey, &cachedTotal)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get from cache: %v", err)
|
||||
}
|
||||
|
||||
if cachedTotal.Total != 42 {
|
||||
t.Errorf("Expected total 42, got %d", cachedTotal.Total)
|
||||
}
|
||||
|
||||
// Test cache miss
|
||||
nonExistentKey := GetQueryTotalCacheKey("nonexistent")
|
||||
var missedTotal CachedTotal
|
||||
err = GetDefaultCache().Get(ctx, nonExistentKey, &missedTotal)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error for cache miss, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashString(t *testing.T) {
|
||||
input1 := "test string"
|
||||
input2 := "test string"
|
||||
input3 := "different string"
|
||||
|
||||
hash1 := hashString(input1)
|
||||
hash2 := hashString(input2)
|
||||
hash3 := hashString(input3)
|
||||
|
||||
// Same input should produce same hash
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("Expected same hash for identical inputs")
|
||||
}
|
||||
|
||||
// Different input should produce different hash
|
||||
if hash1 == hash3 {
|
||||
t.Errorf("Expected different hash for different inputs")
|
||||
}
|
||||
|
||||
// Hash should be hex encoded SHA256 (64 characters)
|
||||
if len(hash1) != 64 {
|
||||
t.Errorf("Expected hash length of 64, got %d", len(hash1))
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user