mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2025-12-30 00:04:25 +00:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02fbdbd651 |
20
pkg/cache/cache_manager.go
vendored
20
pkg/cache/cache_manager.go
vendored
@@ -57,11 +57,31 @@ func (c *Cache) SetBytes(ctx context.Context, key string, value []byte, ttl time
|
|||||||
return c.provider.Set(ctx, key, value, ttl)
|
return c.provider.Set(ctx, key, value, ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetWithTags serializes and stores a value in the cache with the specified TTL and tags.
|
||||||
|
func (c *Cache) SetWithTags(ctx context.Context, key string, value interface{}, ttl time.Duration, tags []string) error {
|
||||||
|
data, err := json.Marshal(value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to serialize: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.provider.SetWithTags(ctx, key, data, ttl, tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBytesWithTags stores raw bytes in the cache with the specified TTL and tags.
|
||||||
|
func (c *Cache) SetBytesWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||||
|
return c.provider.SetWithTags(ctx, key, value, ttl, tags)
|
||||||
|
}
|
||||||
|
|
||||||
// Delete removes a key from the cache.
|
// Delete removes a key from the cache.
|
||||||
func (c *Cache) Delete(ctx context.Context, key string) error {
|
func (c *Cache) Delete(ctx context.Context, key string) error {
|
||||||
return c.provider.Delete(ctx, key)
|
return c.provider.Delete(ctx, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteByTag removes all keys associated with the given tag.
|
||||||
|
func (c *Cache) DeleteByTag(ctx context.Context, tag string) error {
|
||||||
|
return c.provider.DeleteByTag(ctx, tag)
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteByPattern removes all keys matching the pattern.
|
// DeleteByPattern removes all keys matching the pattern.
|
||||||
func (c *Cache) DeleteByPattern(ctx context.Context, pattern string) error {
|
func (c *Cache) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||||
return c.provider.DeleteByPattern(ctx, pattern)
|
return c.provider.DeleteByPattern(ctx, pattern)
|
||||||
|
|||||||
8
pkg/cache/provider.go
vendored
8
pkg/cache/provider.go
vendored
@@ -15,9 +15,17 @@ type Provider interface {
|
|||||||
// If ttl is 0, the item never expires.
|
// If ttl is 0, the item never expires.
|
||||||
Set(ctx context.Context, key string, value []byte, ttl time.Duration) error
|
Set(ctx context.Context, key string, value []byte, ttl time.Duration) error
|
||||||
|
|
||||||
|
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||||
|
// Tags can be used to invalidate groups of related keys.
|
||||||
|
// If ttl is 0, the item never expires.
|
||||||
|
SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error
|
||||||
|
|
||||||
// Delete removes a key from the cache.
|
// Delete removes a key from the cache.
|
||||||
Delete(ctx context.Context, key string) error
|
Delete(ctx context.Context, key string) error
|
||||||
|
|
||||||
|
// DeleteByTag removes all keys associated with the given tag.
|
||||||
|
DeleteByTag(ctx context.Context, tag string) error
|
||||||
|
|
||||||
// DeleteByPattern removes all keys matching the pattern.
|
// DeleteByPattern removes all keys matching the pattern.
|
||||||
// Pattern syntax depends on the provider implementation.
|
// Pattern syntax depends on the provider implementation.
|
||||||
DeleteByPattern(ctx context.Context, pattern string) error
|
DeleteByPattern(ctx context.Context, pattern string) error
|
||||||
|
|||||||
140
pkg/cache/provider_memcache.go
vendored
140
pkg/cache/provider_memcache.go
vendored
@@ -2,6 +2,7 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -97,8 +98,115 @@ func (m *MemcacheProvider) Set(ctx context.Context, key string, value []byte, tt
|
|||||||
return m.client.Set(item)
|
return m.client.Set(item)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||||
|
// Note: Tag support in Memcache is limited and less efficient than Redis.
|
||||||
|
func (m *MemcacheProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||||
|
if ttl == 0 {
|
||||||
|
ttl = m.options.DefaultTTL
|
||||||
|
}
|
||||||
|
|
||||||
|
expiration := int32(ttl.Seconds())
|
||||||
|
|
||||||
|
// Set the main value
|
||||||
|
item := &memcache.Item{
|
||||||
|
Key: key,
|
||||||
|
Value: value,
|
||||||
|
Expiration: expiration,
|
||||||
|
}
|
||||||
|
if err := m.client.Set(item); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store tags for this key
|
||||||
|
if len(tags) > 0 {
|
||||||
|
tagsData, err := json.Marshal(tags)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal tags: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tagsItem := &memcache.Item{
|
||||||
|
Key: fmt.Sprintf("cache:tags:%s", key),
|
||||||
|
Value: tagsData,
|
||||||
|
Expiration: expiration,
|
||||||
|
}
|
||||||
|
if err := m.client.Set(tagsItem); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add key to each tag's key list
|
||||||
|
for _, tag := range tags {
|
||||||
|
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||||
|
|
||||||
|
// Get existing keys for this tag
|
||||||
|
var keys []string
|
||||||
|
if item, err := m.client.Get(tagKey); err == nil {
|
||||||
|
_ = json.Unmarshal(item.Value, &keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add current key if not already present
|
||||||
|
found := false
|
||||||
|
for _, k := range keys {
|
||||||
|
if k == key {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store updated key list
|
||||||
|
keysData, err := json.Marshal(keys)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tagItem := &memcache.Item{
|
||||||
|
Key: tagKey,
|
||||||
|
Value: keysData,
|
||||||
|
Expiration: expiration + 3600, // Give tag lists longer TTL
|
||||||
|
}
|
||||||
|
_ = m.client.Set(tagItem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Delete removes a key from the cache.
|
// Delete removes a key from the cache.
|
||||||
func (m *MemcacheProvider) Delete(ctx context.Context, key string) error {
|
func (m *MemcacheProvider) Delete(ctx context.Context, key string) error {
|
||||||
|
// Get tags for this key
|
||||||
|
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||||
|
if item, err := m.client.Get(tagsKey); err == nil {
|
||||||
|
var tags []string
|
||||||
|
if err := json.Unmarshal(item.Value, &tags); err == nil {
|
||||||
|
// Remove key from each tag's key list
|
||||||
|
for _, tag := range tags {
|
||||||
|
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||||
|
if tagItem, err := m.client.Get(tagKey); err == nil {
|
||||||
|
var keys []string
|
||||||
|
if err := json.Unmarshal(tagItem.Value, &keys); err == nil {
|
||||||
|
// Remove current key from the list
|
||||||
|
newKeys := make([]string, 0, len(keys))
|
||||||
|
for _, k := range keys {
|
||||||
|
if k != key {
|
||||||
|
newKeys = append(newKeys, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Update the tag's key list
|
||||||
|
if keysData, err := json.Marshal(newKeys); err == nil {
|
||||||
|
tagItem.Value = keysData
|
||||||
|
_ = m.client.Set(tagItem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete the tags key
|
||||||
|
_ = m.client.Delete(tagsKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the actual key
|
||||||
err := m.client.Delete(key)
|
err := m.client.Delete(key)
|
||||||
if err == memcache.ErrCacheMiss {
|
if err == memcache.ErrCacheMiss {
|
||||||
return nil
|
return nil
|
||||||
@@ -106,6 +214,38 @@ func (m *MemcacheProvider) Delete(ctx context.Context, key string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteByTag removes all keys associated with the given tag.
|
||||||
|
func (m *MemcacheProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||||
|
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||||
|
|
||||||
|
// Get all keys associated with this tag
|
||||||
|
item, err := m.client.Get(tagKey)
|
||||||
|
if err == memcache.ErrCacheMiss {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
if err := json.Unmarshal(item.Value, &keys); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal tag keys: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all keys
|
||||||
|
for _, key := range keys {
|
||||||
|
_ = m.client.Delete(key)
|
||||||
|
// Also delete the tags key for this cache key
|
||||||
|
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||||
|
_ = m.client.Delete(tagsKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the tag key itself
|
||||||
|
_ = m.client.Delete(tagKey)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteByPattern removes all keys matching the pattern.
|
// DeleteByPattern removes all keys matching the pattern.
|
||||||
// Note: Memcache does not support pattern-based deletion natively.
|
// Note: Memcache does not support pattern-based deletion natively.
|
||||||
// This is a no-op for memcache and returns an error.
|
// This is a no-op for memcache and returns an error.
|
||||||
|
|||||||
118
pkg/cache/provider_memory.go
vendored
118
pkg/cache/provider_memory.go
vendored
@@ -15,6 +15,7 @@ type memoryItem struct {
|
|||||||
Expiration time.Time
|
Expiration time.Time
|
||||||
LastAccess time.Time
|
LastAccess time.Time
|
||||||
HitCount int64
|
HitCount int64
|
||||||
|
Tags []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// isExpired checks if the item has expired.
|
// isExpired checks if the item has expired.
|
||||||
@@ -27,11 +28,12 @@ func (m *memoryItem) isExpired() bool {
|
|||||||
|
|
||||||
// MemoryProvider is an in-memory implementation of the Provider interface.
|
// MemoryProvider is an in-memory implementation of the Provider interface.
|
||||||
type MemoryProvider struct {
|
type MemoryProvider struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
items map[string]*memoryItem
|
items map[string]*memoryItem
|
||||||
options *Options
|
tagToKeys map[string]map[string]struct{} // tag -> set of keys
|
||||||
hits atomic.Int64
|
options *Options
|
||||||
misses atomic.Int64
|
hits atomic.Int64
|
||||||
|
misses atomic.Int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMemoryProvider creates a new in-memory cache provider.
|
// NewMemoryProvider creates a new in-memory cache provider.
|
||||||
@@ -44,8 +46,9 @@ func NewMemoryProvider(opts *Options) *MemoryProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &MemoryProvider{
|
return &MemoryProvider{
|
||||||
items: make(map[string]*memoryItem),
|
items: make(map[string]*memoryItem),
|
||||||
options: opts,
|
tagToKeys: make(map[string]map[string]struct{}),
|
||||||
|
options: opts,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,15 +117,116 @@ func (m *MemoryProvider) Set(ctx context.Context, key string, value []byte, ttl
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||||
|
func (m *MemoryProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
if ttl == 0 {
|
||||||
|
ttl = m.options.DefaultTTL
|
||||||
|
}
|
||||||
|
|
||||||
|
var expiration time.Time
|
||||||
|
if ttl > 0 {
|
||||||
|
expiration = time.Now().Add(ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check max size and evict if necessary
|
||||||
|
if m.options.MaxSize > 0 && len(m.items) >= m.options.MaxSize {
|
||||||
|
if _, exists := m.items[key]; !exists {
|
||||||
|
m.evictOne()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove old tag associations if key exists
|
||||||
|
if oldItem, exists := m.items[key]; exists {
|
||||||
|
for _, tag := range oldItem.Tags {
|
||||||
|
if keySet, ok := m.tagToKeys[tag]; ok {
|
||||||
|
delete(keySet, key)
|
||||||
|
if len(keySet) == 0 {
|
||||||
|
delete(m.tagToKeys, tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the item
|
||||||
|
m.items[key] = &memoryItem{
|
||||||
|
Value: value,
|
||||||
|
Expiration: expiration,
|
||||||
|
LastAccess: time.Now(),
|
||||||
|
Tags: tags,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add new tag associations
|
||||||
|
for _, tag := range tags {
|
||||||
|
if m.tagToKeys[tag] == nil {
|
||||||
|
m.tagToKeys[tag] = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
m.tagToKeys[tag][key] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Delete removes a key from the cache.
|
// Delete removes a key from the cache.
|
||||||
func (m *MemoryProvider) Delete(ctx context.Context, key string) error {
|
func (m *MemoryProvider) Delete(ctx context.Context, key string) error {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
// Remove tag associations
|
||||||
|
if item, exists := m.items[key]; exists {
|
||||||
|
for _, tag := range item.Tags {
|
||||||
|
if keySet, ok := m.tagToKeys[tag]; ok {
|
||||||
|
delete(keySet, key)
|
||||||
|
if len(keySet) == 0 {
|
||||||
|
delete(m.tagToKeys, tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
delete(m.items, key)
|
delete(m.items, key)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteByTag removes all keys associated with the given tag.
|
||||||
|
func (m *MemoryProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
// Get all keys associated with this tag
|
||||||
|
keySet, exists := m.tagToKeys[tag]
|
||||||
|
if !exists {
|
||||||
|
return nil // No keys with this tag
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all items with this tag
|
||||||
|
for key := range keySet {
|
||||||
|
if item, ok := m.items[key]; ok {
|
||||||
|
// Remove this tag from the item's tag list
|
||||||
|
newTags := make([]string, 0, len(item.Tags))
|
||||||
|
for _, t := range item.Tags {
|
||||||
|
if t != tag {
|
||||||
|
newTags = append(newTags, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If item has no more tags, delete it
|
||||||
|
// Otherwise update its tags
|
||||||
|
if len(newTags) == 0 {
|
||||||
|
delete(m.items, key)
|
||||||
|
} else {
|
||||||
|
item.Tags = newTags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the tag mapping
|
||||||
|
delete(m.tagToKeys, tag)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteByPattern removes all keys matching the pattern.
|
// DeleteByPattern removes all keys matching the pattern.
|
||||||
func (m *MemoryProvider) DeleteByPattern(ctx context.Context, pattern string) error {
|
func (m *MemoryProvider) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
|
|||||||
86
pkg/cache/provider_redis.go
vendored
86
pkg/cache/provider_redis.go
vendored
@@ -103,9 +103,93 @@ func (r *RedisProvider) Set(ctx context.Context, key string, value []byte, ttl t
|
|||||||
return r.client.Set(ctx, key, value, ttl).Err()
|
return r.client.Set(ctx, key, value, ttl).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||||
|
func (r *RedisProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||||
|
if ttl == 0 {
|
||||||
|
ttl = r.options.DefaultTTL
|
||||||
|
}
|
||||||
|
|
||||||
|
pipe := r.client.Pipeline()
|
||||||
|
|
||||||
|
// Set the value
|
||||||
|
pipe.Set(ctx, key, value, ttl)
|
||||||
|
|
||||||
|
// Add key to each tag's set
|
||||||
|
for _, tag := range tags {
|
||||||
|
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||||
|
pipe.SAdd(ctx, tagKey, key)
|
||||||
|
// Set expiration on tag set (longer than cache items to ensure cleanup)
|
||||||
|
if ttl > 0 {
|
||||||
|
pipe.Expire(ctx, tagKey, ttl+time.Hour)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store tags for this key for later cleanup
|
||||||
|
if len(tags) > 0 {
|
||||||
|
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||||
|
pipe.SAdd(ctx, tagsKey, tags)
|
||||||
|
if ttl > 0 {
|
||||||
|
pipe.Expire(ctx, tagsKey, ttl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := pipe.Exec(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Delete removes a key from the cache.
|
// Delete removes a key from the cache.
|
||||||
func (r *RedisProvider) Delete(ctx context.Context, key string) error {
|
func (r *RedisProvider) Delete(ctx context.Context, key string) error {
|
||||||
return r.client.Del(ctx, key).Err()
|
pipe := r.client.Pipeline()
|
||||||
|
|
||||||
|
// Get tags for this key
|
||||||
|
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||||
|
tags, err := r.client.SMembers(ctx, tagsKey).Result()
|
||||||
|
if err == nil && len(tags) > 0 {
|
||||||
|
// Remove key from each tag set
|
||||||
|
for _, tag := range tags {
|
||||||
|
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||||
|
pipe.SRem(ctx, tagKey, key)
|
||||||
|
}
|
||||||
|
// Delete the tags key
|
||||||
|
pipe.Del(ctx, tagsKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the actual key
|
||||||
|
pipe.Del(ctx, key)
|
||||||
|
|
||||||
|
_, err = pipe.Exec(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteByTag removes all keys associated with the given tag.
|
||||||
|
func (r *RedisProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||||
|
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||||
|
|
||||||
|
// Get all keys associated with this tag
|
||||||
|
keys, err := r.client.SMembers(ctx, tagKey).Result()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keys) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pipe := r.client.Pipeline()
|
||||||
|
|
||||||
|
// Delete all keys and their tag associations
|
||||||
|
for _, key := range keys {
|
||||||
|
pipe.Del(ctx, key)
|
||||||
|
// Also delete the tags key for this cache key
|
||||||
|
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||||
|
pipe.Del(ctx, tagsKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the tag set itself
|
||||||
|
pipe.Del(ctx, tagKey)
|
||||||
|
|
||||||
|
_, err = pipe.Exec(ctx)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteByPattern removes all keys matching the pattern.
|
// DeleteByPattern removes all keys matching the pattern.
|
||||||
|
|||||||
151
pkg/cache/query_cache_test.go
vendored
151
pkg/cache/query_cache_test.go
vendored
@@ -1,151 +0,0 @@
|
|||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuildQueryCacheKey(t *testing.T) {
|
|
||||||
filters := []common.FilterOption{
|
|
||||||
{Column: "name", Operator: "eq", Value: "test"},
|
|
||||||
{Column: "age", Operator: "gt", Value: 25},
|
|
||||||
}
|
|
||||||
sorts := []common.SortOption{
|
|
||||||
{Column: "name", Direction: "asc"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate cache key
|
|
||||||
key1 := BuildQueryCacheKey("users", filters, sorts, "status = 'active'", "")
|
|
||||||
|
|
||||||
// Same parameters should generate same key
|
|
||||||
key2 := BuildQueryCacheKey("users", filters, sorts, "status = 'active'", "")
|
|
||||||
|
|
||||||
if key1 != key2 {
|
|
||||||
t.Errorf("Expected same cache keys for identical parameters, got %s and %s", key1, key2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Different parameters should generate different key
|
|
||||||
key3 := BuildQueryCacheKey("users", filters, sorts, "status = 'inactive'", "")
|
|
||||||
|
|
||||||
if key1 == key3 {
|
|
||||||
t.Errorf("Expected different cache keys for different parameters, got %s and %s", key1, key3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildExtendedQueryCacheKey(t *testing.T) {
|
|
||||||
filters := []common.FilterOption{
|
|
||||||
{Column: "name", Operator: "eq", Value: "test"},
|
|
||||||
}
|
|
||||||
sorts := []common.SortOption{
|
|
||||||
{Column: "name", Direction: "asc"},
|
|
||||||
}
|
|
||||||
expandOpts := []interface{}{
|
|
||||||
map[string]interface{}{
|
|
||||||
"relation": "posts",
|
|
||||||
"where": "status = 'published'",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate cache key
|
|
||||||
key1 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, false, "", "")
|
|
||||||
|
|
||||||
// Same parameters should generate same key
|
|
||||||
key2 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, false, "", "")
|
|
||||||
|
|
||||||
if key1 != key2 {
|
|
||||||
t.Errorf("Expected same cache keys for identical parameters")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Different distinct value should generate different key
|
|
||||||
key3 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, true, "", "")
|
|
||||||
|
|
||||||
if key1 == key3 {
|
|
||||||
t.Errorf("Expected different cache keys for different distinct values")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetQueryTotalCacheKey(t *testing.T) {
|
|
||||||
hash := "abc123"
|
|
||||||
key := GetQueryTotalCacheKey(hash)
|
|
||||||
|
|
||||||
expected := "query_total:abc123"
|
|
||||||
if key != expected {
|
|
||||||
t.Errorf("Expected %s, got %s", expected, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedTotalIntegration(t *testing.T) {
|
|
||||||
// Initialize cache with memory provider for testing
|
|
||||||
UseMemory(&Options{
|
|
||||||
DefaultTTL: 1 * time.Minute,
|
|
||||||
MaxSize: 100,
|
|
||||||
})
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Create test data
|
|
||||||
filters := []common.FilterOption{
|
|
||||||
{Column: "status", Operator: "eq", Value: "active"},
|
|
||||||
}
|
|
||||||
sorts := []common.SortOption{
|
|
||||||
{Column: "created_at", Direction: "desc"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build cache key
|
|
||||||
cacheKeyHash := BuildQueryCacheKey("test_table", filters, sorts, "", "")
|
|
||||||
cacheKey := GetQueryTotalCacheKey(cacheKeyHash)
|
|
||||||
|
|
||||||
// Store a total count in cache
|
|
||||||
totalToCache := CachedTotal{Total: 42}
|
|
||||||
err := GetDefaultCache().Set(ctx, cacheKey, totalToCache, time.Minute)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to set cache: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve from cache
|
|
||||||
var cachedTotal CachedTotal
|
|
||||||
err = GetDefaultCache().Get(ctx, cacheKey, &cachedTotal)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to get from cache: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cachedTotal.Total != 42 {
|
|
||||||
t.Errorf("Expected total 42, got %d", cachedTotal.Total)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test cache miss
|
|
||||||
nonExistentKey := GetQueryTotalCacheKey("nonexistent")
|
|
||||||
var missedTotal CachedTotal
|
|
||||||
err = GetDefaultCache().Get(ctx, nonExistentKey, &missedTotal)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("Expected error for cache miss, got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHashString(t *testing.T) {
|
|
||||||
input1 := "test string"
|
|
||||||
input2 := "test string"
|
|
||||||
input3 := "different string"
|
|
||||||
|
|
||||||
hash1 := hashString(input1)
|
|
||||||
hash2 := hashString(input2)
|
|
||||||
hash3 := hashString(input3)
|
|
||||||
|
|
||||||
// Same input should produce same hash
|
|
||||||
if hash1 != hash2 {
|
|
||||||
t.Errorf("Expected same hash for identical inputs")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Different input should produce different hash
|
|
||||||
if hash1 == hash3 {
|
|
||||||
t.Errorf("Expected different hash for different inputs")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash should be hex encoded SHA256 (64 characters)
|
|
||||||
if len(hash1) != 64 {
|
|
||||||
t.Errorf("Expected hash length of 64, got %d", len(hash1))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
118
pkg/resolvespec/cache_helpers.go
Normal file
118
pkg/resolvespec/cache_helpers.go
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
package resolvespec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/bitechdev/ResolveSpec/pkg/cache"
|
||||||
|
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
// queryCacheKey represents the components used to build a cache key for query total count
|
||||||
|
type queryCacheKey struct {
|
||||||
|
TableName string `json:"table_name"`
|
||||||
|
Filters []common.FilterOption `json:"filters"`
|
||||||
|
Sort []common.SortOption `json:"sort"`
|
||||||
|
CustomSQLWhere string `json:"custom_sql_where,omitempty"`
|
||||||
|
CustomSQLOr string `json:"custom_sql_or,omitempty"`
|
||||||
|
CursorForward string `json:"cursor_forward,omitempty"`
|
||||||
|
CursorBackward string `json:"cursor_backward,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// cachedTotal represents a cached total count
|
||||||
|
type cachedTotal struct {
|
||||||
|
Total int `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildQueryCacheKey builds a cache key from query parameters for total count caching
|
||||||
|
func buildQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption, customWhere, customOr string) string {
|
||||||
|
key := queryCacheKey{
|
||||||
|
TableName: tableName,
|
||||||
|
Filters: filters,
|
||||||
|
Sort: sort,
|
||||||
|
CustomSQLWhere: customWhere,
|
||||||
|
CustomSQLOr: customOr,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize to JSON for consistent hashing
|
||||||
|
jsonData, err := json.Marshal(key)
|
||||||
|
if err != nil {
|
||||||
|
// Fallback to simple string concatenation if JSON fails
|
||||||
|
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s", tableName, filters, sort, customWhere, customOr))
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashString(string(jsonData))
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildExtendedQueryCacheKey builds a cache key for extended query options with cursor pagination
|
||||||
|
func buildExtendedQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption,
|
||||||
|
customWhere, customOr string, cursorFwd, cursorBwd string) string {
|
||||||
|
|
||||||
|
key := queryCacheKey{
|
||||||
|
TableName: tableName,
|
||||||
|
Filters: filters,
|
||||||
|
Sort: sort,
|
||||||
|
CustomSQLWhere: customWhere,
|
||||||
|
CustomSQLOr: customOr,
|
||||||
|
CursorForward: cursorFwd,
|
||||||
|
CursorBackward: cursorBwd,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize to JSON for consistent hashing
|
||||||
|
jsonData, err := json.Marshal(key)
|
||||||
|
if err != nil {
|
||||||
|
// Fallback to simple string concatenation if JSON fails
|
||||||
|
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s_%s_%s",
|
||||||
|
tableName, filters, sort, customWhere, customOr, cursorFwd, cursorBwd))
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashString(string(jsonData))
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashString computes SHA256 hash of a string
|
||||||
|
func hashString(s string) string {
|
||||||
|
h := sha256.New()
|
||||||
|
h.Write([]byte(s))
|
||||||
|
return hex.EncodeToString(h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// getQueryTotalCacheKey returns a formatted cache key for storing/retrieving total count
|
||||||
|
func getQueryTotalCacheKey(hash string) string {
|
||||||
|
return fmt.Sprintf("query_total:%s", hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildCacheTags creates cache tags from schema and table name
|
||||||
|
func buildCacheTags(schema, tableName string) []string {
|
||||||
|
return []string{
|
||||||
|
fmt.Sprintf("schema:%s", strings.ToLower(schema)),
|
||||||
|
fmt.Sprintf("table:%s", strings.ToLower(tableName)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setQueryTotalCache stores a query total in the cache with schema and table tags
|
||||||
|
func setQueryTotalCache(ctx context.Context, cacheKey string, total int, schema, tableName string, ttl time.Duration) error {
|
||||||
|
c := cache.GetDefaultCache()
|
||||||
|
cacheData := cachedTotal{Total: total}
|
||||||
|
tags := buildCacheTags(schema, tableName)
|
||||||
|
|
||||||
|
return c.SetWithTags(ctx, cacheKey, cacheData, ttl, tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// invalidateCacheForTags removes all cached items matching the specified tags
|
||||||
|
func invalidateCacheForTags(ctx context.Context, tags []string) error {
|
||||||
|
c := cache.GetDefaultCache()
|
||||||
|
|
||||||
|
// Invalidate for each tag
|
||||||
|
for _, tag := range tags {
|
||||||
|
if err := c.DeleteByTag(ctx, tag); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -331,19 +331,17 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
|||||||
// Use extended cache key if cursors are present
|
// Use extended cache key if cursors are present
|
||||||
var cacheKeyHash string
|
var cacheKeyHash string
|
||||||
if len(options.CursorForward) > 0 || len(options.CursorBackward) > 0 {
|
if len(options.CursorForward) > 0 || len(options.CursorBackward) > 0 {
|
||||||
cacheKeyHash = cache.BuildExtendedQueryCacheKey(
|
cacheKeyHash = buildExtendedQueryCacheKey(
|
||||||
tableName,
|
tableName,
|
||||||
options.Filters,
|
options.Filters,
|
||||||
options.Sort,
|
options.Sort,
|
||||||
"", // No custom SQL WHERE in resolvespec
|
"", // No custom SQL WHERE in resolvespec
|
||||||
"", // No custom SQL OR in resolvespec
|
"", // No custom SQL OR in resolvespec
|
||||||
nil, // No expand options in resolvespec
|
|
||||||
false, // distinct not used here
|
|
||||||
options.CursorForward,
|
options.CursorForward,
|
||||||
options.CursorBackward,
|
options.CursorBackward,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
cacheKeyHash = cache.BuildQueryCacheKey(
|
cacheKeyHash = buildQueryCacheKey(
|
||||||
tableName,
|
tableName,
|
||||||
options.Filters,
|
options.Filters,
|
||||||
options.Sort,
|
options.Sort,
|
||||||
@@ -351,10 +349,10 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
|||||||
"", // No custom SQL OR in resolvespec
|
"", // No custom SQL OR in resolvespec
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
cacheKey := cache.GetQueryTotalCacheKey(cacheKeyHash)
|
cacheKey := getQueryTotalCacheKey(cacheKeyHash)
|
||||||
|
|
||||||
// Try to retrieve from cache
|
// Try to retrieve from cache
|
||||||
var cachedTotal cache.CachedTotal
|
var cachedTotal cachedTotal
|
||||||
err := cache.GetDefaultCache().Get(ctx, cacheKey, &cachedTotal)
|
err := cache.GetDefaultCache().Get(ctx, cacheKey, &cachedTotal)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
total = cachedTotal.Total
|
total = cachedTotal.Total
|
||||||
@@ -371,10 +369,9 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
|||||||
total = count
|
total = count
|
||||||
logger.Debug("Total records (from query): %d", total)
|
logger.Debug("Total records (from query): %d", total)
|
||||||
|
|
||||||
// Store in cache
|
// Store in cache with schema and table tags
|
||||||
cacheTTL := time.Minute * 2 // Default 2 minutes TTL
|
cacheTTL := time.Minute * 2 // Default 2 minutes TTL
|
||||||
cacheData := cache.CachedTotal{Total: total}
|
if err := setQueryTotalCache(ctx, cacheKey, total, schema, tableName, cacheTTL); err != nil {
|
||||||
if err := cache.GetDefaultCache().Set(ctx, cacheKey, cacheData, cacheTTL); err != nil {
|
|
||||||
logger.Warn("Failed to cache query total: %v", err)
|
logger.Warn("Failed to cache query total: %v", err)
|
||||||
// Don't fail the request if caching fails
|
// Don't fail the request if caching fails
|
||||||
} else {
|
} else {
|
||||||
@@ -464,6 +461,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully created record with nested data, ID: %v", result.ID)
|
logger.Info("Successfully created record with nested data, ID: %v", result.ID)
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, result.Data, nil)
|
h.sendResponse(w, result.Data, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -480,6 +482,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully created record, rows affected: %d", result.RowsAffected())
|
logger.Info("Successfully created record, rows affected: %d", result.RowsAffected())
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, v, nil)
|
h.sendResponse(w, v, nil)
|
||||||
|
|
||||||
case []map[string]interface{}:
|
case []map[string]interface{}:
|
||||||
@@ -518,6 +525,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully created %d records with nested data", len(results))
|
logger.Info("Successfully created %d records with nested data", len(results))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, results, nil)
|
h.sendResponse(w, results, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -541,6 +553,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully created %d records", len(v))
|
logger.Info("Successfully created %d records", len(v))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, v, nil)
|
h.sendResponse(w, v, nil)
|
||||||
|
|
||||||
case []interface{}:
|
case []interface{}:
|
||||||
@@ -584,6 +601,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully created %d records with nested data", len(results))
|
logger.Info("Successfully created %d records with nested data", len(results))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, results, nil)
|
h.sendResponse(w, results, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -611,6 +633,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully created %d records", len(v))
|
logger.Info("Successfully created %d records", len(v))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, list, nil)
|
h.sendResponse(w, list, nil)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -661,6 +688,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully updated record with nested data, rows: %d", result.AffectedRows)
|
logger.Info("Successfully updated record with nested data, rows: %d", result.AffectedRows)
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, result.Data, nil)
|
h.sendResponse(w, result.Data, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -697,6 +729,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Successfully updated %d records", result.RowsAffected())
|
logger.Info("Successfully updated %d records", result.RowsAffected())
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, data, nil)
|
h.sendResponse(w, data, nil)
|
||||||
|
|
||||||
case []map[string]interface{}:
|
case []map[string]interface{}:
|
||||||
@@ -735,6 +772,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully updated %d records with nested data", len(results))
|
logger.Info("Successfully updated %d records with nested data", len(results))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, results, nil)
|
h.sendResponse(w, results, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -758,6 +800,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully updated %d records", len(updates))
|
logger.Info("Successfully updated %d records", len(updates))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, updates, nil)
|
h.sendResponse(w, updates, nil)
|
||||||
|
|
||||||
case []interface{}:
|
case []interface{}:
|
||||||
@@ -800,6 +847,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully updated %d records with nested data", len(results))
|
logger.Info("Successfully updated %d records with nested data", len(results))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, results, nil)
|
h.sendResponse(w, results, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -827,6 +879,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully updated %d records", len(list))
|
logger.Info("Successfully updated %d records", len(list))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, list, nil)
|
h.sendResponse(w, list, nil)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -873,6 +930,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully deleted %d records", len(v))
|
logger.Info("Successfully deleted %d records", len(v))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, map[string]interface{}{"deleted": len(v)}, nil)
|
h.sendResponse(w, map[string]interface{}{"deleted": len(v)}, nil)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -914,6 +976,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully deleted %d records", deletedCount)
|
logger.Info("Successfully deleted %d records", deletedCount)
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -940,6 +1007,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully deleted %d records", deletedCount)
|
logger.Info("Successfully deleted %d records", deletedCount)
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -998,6 +1070,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
|||||||
|
|
||||||
logger.Info("Successfully deleted record with ID: %s", id)
|
logger.Info("Successfully deleted record with ID: %s", id)
|
||||||
// Return the deleted record data
|
// Return the deleted record data
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, recordToDelete, nil)
|
h.sendResponse(w, recordToDelete, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
package cache
|
package restheadspec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -7,56 +7,42 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/bitechdev/ResolveSpec/pkg/cache"
|
||||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueryCacheKey represents the components used to build a cache key for query total count
|
// expandOptionKey represents expand options for cache key
|
||||||
type QueryCacheKey struct {
|
type expandOptionKey struct {
|
||||||
|
Relation string `json:"relation"`
|
||||||
|
Where string `json:"where,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryCacheKey represents the components used to build a cache key for query total count
|
||||||
|
type queryCacheKey struct {
|
||||||
TableName string `json:"table_name"`
|
TableName string `json:"table_name"`
|
||||||
Filters []common.FilterOption `json:"filters"`
|
Filters []common.FilterOption `json:"filters"`
|
||||||
Sort []common.SortOption `json:"sort"`
|
Sort []common.SortOption `json:"sort"`
|
||||||
CustomSQLWhere string `json:"custom_sql_where,omitempty"`
|
CustomSQLWhere string `json:"custom_sql_where,omitempty"`
|
||||||
CustomSQLOr string `json:"custom_sql_or,omitempty"`
|
CustomSQLOr string `json:"custom_sql_or,omitempty"`
|
||||||
Expand []ExpandOptionKey `json:"expand,omitempty"`
|
Expand []expandOptionKey `json:"expand,omitempty"`
|
||||||
Distinct bool `json:"distinct,omitempty"`
|
Distinct bool `json:"distinct,omitempty"`
|
||||||
CursorForward string `json:"cursor_forward,omitempty"`
|
CursorForward string `json:"cursor_forward,omitempty"`
|
||||||
CursorBackward string `json:"cursor_backward,omitempty"`
|
CursorBackward string `json:"cursor_backward,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpandOptionKey represents expand options for cache key
|
// cachedTotal represents a cached total count
|
||||||
type ExpandOptionKey struct {
|
type cachedTotal struct {
|
||||||
Relation string `json:"relation"`
|
Total int `json:"total"`
|
||||||
Where string `json:"where,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildQueryCacheKey builds a cache key from query parameters for total count caching
|
// buildExtendedQueryCacheKey builds a cache key for extended query options (restheadspec)
|
||||||
// This is used to cache the total count of records matching a query
|
|
||||||
func BuildQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption, customWhere, customOr string) string {
|
|
||||||
key := QueryCacheKey{
|
|
||||||
TableName: tableName,
|
|
||||||
Filters: filters,
|
|
||||||
Sort: sort,
|
|
||||||
CustomSQLWhere: customWhere,
|
|
||||||
CustomSQLOr: customOr,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize to JSON for consistent hashing
|
|
||||||
jsonData, err := json.Marshal(key)
|
|
||||||
if err != nil {
|
|
||||||
// Fallback to simple string concatenation if JSON fails
|
|
||||||
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s", tableName, filters, sort, customWhere, customOr))
|
|
||||||
}
|
|
||||||
|
|
||||||
return hashString(string(jsonData))
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildExtendedQueryCacheKey builds a cache key for extended query options (restheadspec)
|
|
||||||
// Includes expand, distinct, and cursor pagination options
|
// Includes expand, distinct, and cursor pagination options
|
||||||
func BuildExtendedQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption,
|
func buildExtendedQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption,
|
||||||
customWhere, customOr string, expandOpts []interface{}, distinct bool, cursorFwd, cursorBwd string) string {
|
customWhere, customOr string, expandOpts []interface{}, distinct bool, cursorFwd, cursorBwd string) string {
|
||||||
|
|
||||||
key := QueryCacheKey{
|
key := queryCacheKey{
|
||||||
TableName: tableName,
|
TableName: tableName,
|
||||||
Filters: filters,
|
Filters: filters,
|
||||||
Sort: sort,
|
Sort: sort,
|
||||||
@@ -69,11 +55,11 @@ func BuildExtendedQueryCacheKey(tableName string, filters []common.FilterOption,
|
|||||||
|
|
||||||
// Convert expand options to cache key format
|
// Convert expand options to cache key format
|
||||||
if len(expandOpts) > 0 {
|
if len(expandOpts) > 0 {
|
||||||
key.Expand = make([]ExpandOptionKey, 0, len(expandOpts))
|
key.Expand = make([]expandOptionKey, 0, len(expandOpts))
|
||||||
for _, exp := range expandOpts {
|
for _, exp := range expandOpts {
|
||||||
// Type assert to get the expand option fields we care about for caching
|
// Type assert to get the expand option fields we care about for caching
|
||||||
if expMap, ok := exp.(map[string]interface{}); ok {
|
if expMap, ok := exp.(map[string]interface{}); ok {
|
||||||
expKey := ExpandOptionKey{}
|
expKey := expandOptionKey{}
|
||||||
if rel, ok := expMap["relation"].(string); ok {
|
if rel, ok := expMap["relation"].(string); ok {
|
||||||
expKey.Relation = rel
|
expKey.Relation = rel
|
||||||
}
|
}
|
||||||
@@ -83,7 +69,6 @@ func BuildExtendedQueryCacheKey(tableName string, filters []common.FilterOption,
|
|||||||
key.Expand = append(key.Expand, expKey)
|
key.Expand = append(key.Expand, expKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Sort expand options for consistent hashing (already sorted by relation name above)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialize to JSON for consistent hashing
|
// Serialize to JSON for consistent hashing
|
||||||
@@ -104,24 +89,38 @@ func hashString(s string) string {
|
|||||||
return hex.EncodeToString(h.Sum(nil))
|
return hex.EncodeToString(h.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetQueryTotalCacheKey returns a formatted cache key for storing/retrieving total count
|
// getQueryTotalCacheKey returns a formatted cache key for storing/retrieving total count
|
||||||
func GetQueryTotalCacheKey(hash string) string {
|
func getQueryTotalCacheKey(hash string) string {
|
||||||
return fmt.Sprintf("query_total:%s", hash)
|
return fmt.Sprintf("query_total:%s", hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CachedTotal represents a cached total count
|
// buildCacheTags creates cache tags from schema and table name
|
||||||
type CachedTotal struct {
|
func buildCacheTags(schema, tableName string) []string {
|
||||||
Total int `json:"total"`
|
return []string{
|
||||||
|
fmt.Sprintf("schema:%s", strings.ToLower(schema)),
|
||||||
|
fmt.Sprintf("table:%s", strings.ToLower(tableName)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InvalidateCacheForTable removes all cached totals for a specific table
|
// setQueryTotalCache stores a query total in the cache with schema and table tags
|
||||||
// This should be called when data in the table changes (insert/update/delete)
|
func setQueryTotalCache(ctx context.Context, cacheKey string, total int, schema, tableName string, ttl time.Duration) error {
|
||||||
func InvalidateCacheForTable(ctx context.Context, tableName string) error {
|
c := cache.GetDefaultCache()
|
||||||
cache := GetDefaultCache()
|
cacheData := cachedTotal{Total: total}
|
||||||
|
tags := buildCacheTags(schema, tableName)
|
||||||
|
|
||||||
// Build a pattern to match all query totals for this table
|
return c.SetWithTags(ctx, cacheKey, cacheData, ttl, tags)
|
||||||
// Note: This requires pattern matching support in the provider
|
}
|
||||||
pattern := fmt.Sprintf("query_total:*%s*", strings.ToLower(tableName))
|
|
||||||
|
// invalidateCacheForTags removes all cached items matching the specified tags
|
||||||
return cache.DeleteByPattern(ctx, pattern)
|
func invalidateCacheForTags(ctx context.Context, tags []string) error {
|
||||||
|
c := cache.GetDefaultCache()
|
||||||
|
|
||||||
|
// Invalidate for each tag
|
||||||
|
for _, tag := range tags {
|
||||||
|
if err := c.DeleteByTag(ctx, tag); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
@@ -529,7 +529,7 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
|||||||
var total int
|
var total int
|
||||||
if !options.SkipCount {
|
if !options.SkipCount {
|
||||||
// Try to get from cache first (unless SkipCache is true)
|
// Try to get from cache first (unless SkipCache is true)
|
||||||
var cachedTotal *cache.CachedTotal
|
var cachedTotalData *cachedTotal
|
||||||
var cacheKey string
|
var cacheKey string
|
||||||
|
|
||||||
if !options.SkipCache {
|
if !options.SkipCache {
|
||||||
@@ -543,7 +543,7 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheKeyHash := cache.BuildExtendedQueryCacheKey(
|
cacheKeyHash := buildExtendedQueryCacheKey(
|
||||||
tableName,
|
tableName,
|
||||||
options.Filters,
|
options.Filters,
|
||||||
options.Sort,
|
options.Sort,
|
||||||
@@ -554,22 +554,22 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
|||||||
options.CursorForward,
|
options.CursorForward,
|
||||||
options.CursorBackward,
|
options.CursorBackward,
|
||||||
)
|
)
|
||||||
cacheKey = cache.GetQueryTotalCacheKey(cacheKeyHash)
|
cacheKey = getQueryTotalCacheKey(cacheKeyHash)
|
||||||
|
|
||||||
// Try to retrieve from cache
|
// Try to retrieve from cache
|
||||||
cachedTotal = &cache.CachedTotal{}
|
cachedTotalData = &cachedTotal{}
|
||||||
err := cache.GetDefaultCache().Get(ctx, cacheKey, cachedTotal)
|
err := cache.GetDefaultCache().Get(ctx, cacheKey, cachedTotalData)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
total = cachedTotal.Total
|
total = cachedTotalData.Total
|
||||||
logger.Debug("Total records (from cache): %d", total)
|
logger.Debug("Total records (from cache): %d", total)
|
||||||
} else {
|
} else {
|
||||||
logger.Debug("Cache miss for query total")
|
logger.Debug("Cache miss for query total")
|
||||||
cachedTotal = nil
|
cachedTotalData = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If not in cache or cache skip, execute count query
|
// If not in cache or cache skip, execute count query
|
||||||
if cachedTotal == nil {
|
if cachedTotalData == nil {
|
||||||
count, err := query.Count(ctx)
|
count, err := query.Count(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Error counting records: %v", err)
|
logger.Error("Error counting records: %v", err)
|
||||||
@@ -579,11 +579,10 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
|||||||
total = count
|
total = count
|
||||||
logger.Debug("Total records (from query): %d", total)
|
logger.Debug("Total records (from query): %d", total)
|
||||||
|
|
||||||
// Store in cache (if caching is enabled)
|
// Store in cache with schema and table tags (if caching is enabled)
|
||||||
if !options.SkipCache && cacheKey != "" {
|
if !options.SkipCache && cacheKey != "" {
|
||||||
cacheTTL := time.Minute * 2 // Default 2 minutes TTL
|
cacheTTL := time.Minute * 2 // Default 2 minutes TTL
|
||||||
cacheData := &cache.CachedTotal{Total: total}
|
if err := setQueryTotalCache(ctx, cacheKey, total, schema, tableName, cacheTTL); err != nil {
|
||||||
if err := cache.GetDefaultCache().Set(ctx, cacheKey, cacheData, cacheTTL); err != nil {
|
|
||||||
logger.Warn("Failed to cache query total: %v", err)
|
logger.Warn("Failed to cache query total: %v", err)
|
||||||
// Don't fail the request if caching fails
|
// Don't fail the request if caching fails
|
||||||
} else {
|
} else {
|
||||||
@@ -1149,6 +1148,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Successfully created %d record(s)", len(mergedResults))
|
logger.Info("Successfully created %d record(s)", len(mergedResults))
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponseWithOptions(w, responseData, nil, &options)
|
h.sendResponseWithOptions(w, responseData, nil, &options)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1320,6 +1324,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, id
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Successfully updated record with ID: %v", targetID)
|
logger.Info("Successfully updated record with ID: %v", targetID)
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponseWithOptions(w, mergedData, nil, &options)
|
h.sendResponseWithOptions(w, mergedData, nil, &options)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1388,6 +1397,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully deleted %d records", deletedCount)
|
logger.Info("Successfully deleted %d records", deletedCount)
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1456,6 +1470,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully deleted %d records", deletedCount)
|
logger.Info("Successfully deleted %d records", deletedCount)
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1510,6 +1529,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Successfully deleted %d records", deletedCount)
|
logger.Info("Successfully deleted %d records", deletedCount)
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1611,6 +1635,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return the deleted record data
|
// Return the deleted record data
|
||||||
|
// Invalidate cache for this table
|
||||||
|
cacheTags := buildCacheTags(schema, tableName)
|
||||||
|
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||||
|
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||||
|
}
|
||||||
h.sendResponse(w, recordToDelete, nil)
|
h.sendResponse(w, recordToDelete, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user