mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2025-12-31 00:34:25 +00:00
Added cache, funcspec and implemented total cache
This commit is contained in:
340
pkg/cache/README.md
vendored
Normal file
340
pkg/cache/README.md
vendored
Normal file
@@ -0,0 +1,340 @@
|
||||
# Cache Package
|
||||
|
||||
A flexible, provider-based caching library for Go that supports multiple backend storage systems including in-memory, Redis, and Memcache.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multiple Providers**: Support for in-memory, Redis, and Memcache backends
|
||||
- **Pluggable Architecture**: Easy to add custom cache providers
|
||||
- **Type-Safe API**: Automatic JSON serialization/deserialization
|
||||
- **TTL Support**: Configurable time-to-live for cache entries
|
||||
- **Context-Aware**: All operations support Go contexts
|
||||
- **Statistics**: Built-in cache statistics and monitoring
|
||||
- **Pattern Deletion**: Delete keys by pattern (Redis)
|
||||
- **Lazy Loading**: GetOrSet pattern for easy cache-aside implementation
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/bitechdev/ResolveSpec/pkg/cache
|
||||
```
|
||||
|
||||
For Redis support:
|
||||
```bash
|
||||
go get github.com/redis/go-redis/v9
|
||||
```
|
||||
|
||||
For Memcache support:
|
||||
```bash
|
||||
go get github.com/bradfitz/gomemcache/memcache
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### In-Memory Cache
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/cache"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Initialize with in-memory provider
|
||||
cache.UseMemory(&cache.Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
MaxSize: 10000,
|
||||
})
|
||||
defer cache.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
c := cache.GetDefaultCache()
|
||||
|
||||
// Store a value
|
||||
type User struct {
|
||||
ID int
|
||||
Name string
|
||||
}
|
||||
user := User{ID: 1, Name: "John"}
|
||||
c.Set(ctx, "user:1", user, 10*time.Minute)
|
||||
|
||||
// Retrieve a value
|
||||
var retrieved User
|
||||
c.Get(ctx, "user:1", &retrieved)
|
||||
}
|
||||
```
|
||||
|
||||
### Redis Cache
|
||||
|
||||
```go
|
||||
cache.UseRedis(&cache.RedisConfig{
|
||||
Host: "localhost",
|
||||
Port: 6379,
|
||||
Password: "",
|
||||
DB: 0,
|
||||
Options: &cache.Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
},
|
||||
})
|
||||
defer cache.Close()
|
||||
```
|
||||
|
||||
### Memcache
|
||||
|
||||
```go
|
||||
cache.UseMemcache(&cache.MemcacheConfig{
|
||||
Servers: []string{"localhost:11211"},
|
||||
Options: &cache.Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
},
|
||||
})
|
||||
defer cache.Close()
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### Core Methods
|
||||
|
||||
#### Set
|
||||
```go
|
||||
Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error
|
||||
```
|
||||
Stores a value in the cache with automatic JSON serialization.
|
||||
|
||||
#### Get
|
||||
```go
|
||||
Get(ctx context.Context, key string, dest interface{}) error
|
||||
```
|
||||
Retrieves and deserializes a value from the cache.
|
||||
|
||||
#### SetBytes / GetBytes
|
||||
```go
|
||||
SetBytes(ctx context.Context, key string, value []byte, ttl time.Duration) error
|
||||
GetBytes(ctx context.Context, key string) ([]byte, error)
|
||||
```
|
||||
Store and retrieve raw bytes without serialization.
|
||||
|
||||
#### Delete
|
||||
```go
|
||||
Delete(ctx context.Context, key string) error
|
||||
```
|
||||
Removes a key from the cache.
|
||||
|
||||
#### DeleteByPattern
|
||||
```go
|
||||
DeleteByPattern(ctx context.Context, pattern string) error
|
||||
```
|
||||
Removes all keys matching a pattern (Redis only).
|
||||
|
||||
#### Clear
|
||||
```go
|
||||
Clear(ctx context.Context) error
|
||||
```
|
||||
Removes all items from the cache.
|
||||
|
||||
#### Exists
|
||||
```go
|
||||
Exists(ctx context.Context, key string) bool
|
||||
```
|
||||
Checks if a key exists in the cache.
|
||||
|
||||
#### GetOrSet
|
||||
```go
|
||||
GetOrSet(ctx context.Context, key string, dest interface{}, ttl time.Duration,
|
||||
loader func() (interface{}, error)) error
|
||||
```
|
||||
Retrieves a value from cache, or loads and caches it if not found (lazy loading).
|
||||
|
||||
#### Stats
|
||||
```go
|
||||
Stats(ctx context.Context) (*CacheStats, error)
|
||||
```
|
||||
Returns cache statistics including hits, misses, and key counts.
|
||||
|
||||
## Provider Configuration
|
||||
|
||||
### In-Memory Options
|
||||
|
||||
```go
|
||||
&cache.Options{
|
||||
DefaultTTL: 5 * time.Minute, // Default expiration time
|
||||
MaxSize: 10000, // Maximum number of items
|
||||
EvictionPolicy: "LRU", // Eviction strategy (future)
|
||||
}
|
||||
```
|
||||
|
||||
### Redis Configuration
|
||||
|
||||
```go
|
||||
&cache.RedisConfig{
|
||||
Host: "localhost",
|
||||
Port: 6379,
|
||||
Password: "", // Optional authentication
|
||||
DB: 0, // Database number
|
||||
PoolSize: 10, // Connection pool size
|
||||
Options: &cache.Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Memcache Configuration
|
||||
|
||||
```go
|
||||
&cache.MemcacheConfig{
|
||||
Servers: []string{"localhost:11211"},
|
||||
MaxIdleConns: 2,
|
||||
Timeout: 1 * time.Second,
|
||||
Options: &cache.Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Provider
|
||||
|
||||
```go
|
||||
// Create a custom provider instance
|
||||
memProvider := cache.NewMemoryProvider(&cache.Options{
|
||||
DefaultTTL: 10 * time.Minute,
|
||||
MaxSize: 500,
|
||||
})
|
||||
|
||||
// Initialize with custom provider
|
||||
cache.Initialize(memProvider)
|
||||
```
|
||||
|
||||
### Lazy Loading Pattern
|
||||
|
||||
```go
|
||||
var data ExpensiveData
|
||||
err := c.GetOrSet(ctx, "expensive:key", &data, 10*time.Minute, func() (interface{}, error) {
|
||||
// This expensive operation only runs if key is not in cache
|
||||
return computeExpensiveData(), nil
|
||||
})
|
||||
```
|
||||
|
||||
### Query API Cache
|
||||
|
||||
The package includes specialized functions for caching query results:
|
||||
|
||||
```go
|
||||
// Cache a query result
|
||||
api := "GetUsers"
|
||||
query := "SELECT * FROM users WHERE active = true"
|
||||
tablenames := "users"
|
||||
total := int64(150)
|
||||
|
||||
cache.PutQueryAPICache(ctx, api, query, tablenames, total)
|
||||
|
||||
// Retrieve cached query
|
||||
hash := cache.HashQueryAPICache(api, query)
|
||||
cachedQuery, err := cache.FetchQueryAPICache(ctx, hash)
|
||||
```
|
||||
|
||||
## Provider Comparison
|
||||
|
||||
| Feature | In-Memory | Redis | Memcache |
|
||||
|---------|-----------|-------|----------|
|
||||
| Persistence | No | Yes | No |
|
||||
| Distributed | No | Yes | Yes |
|
||||
| Pattern Delete | No | Yes | No |
|
||||
| Statistics | Full | Full | Limited |
|
||||
| Atomic Operations | Yes | Yes | Yes |
|
||||
| Max Item Size | Memory | 512MB | 1MB |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use contexts**: Always pass context for cancellation and timeout control
|
||||
2. **Set appropriate TTLs**: Balance between freshness and performance
|
||||
3. **Handle errors**: Cache misses and errors should be handled gracefully
|
||||
4. **Monitor statistics**: Use Stats() to monitor cache performance
|
||||
5. **Clean up**: Always call Close() when shutting down
|
||||
6. **Pattern consistency**: Use consistent key naming patterns (e.g., "user:id:field")
|
||||
|
||||
## Example: Complete Application
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"time"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/cache"
|
||||
)
|
||||
|
||||
type UserService struct {
|
||||
cache *cache.Cache
|
||||
}
|
||||
|
||||
func NewUserService() *UserService {
|
||||
// Initialize with Redis in production, memory for testing
|
||||
cache.UseRedis(&cache.RedisConfig{
|
||||
Host: "localhost",
|
||||
Port: 6379,
|
||||
Options: &cache.Options{
|
||||
DefaultTTL: 10 * time.Minute,
|
||||
},
|
||||
})
|
||||
|
||||
return &UserService{
|
||||
cache: cache.GetDefaultCache(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *UserService) GetUser(ctx context.Context, userID int) (*User, error) {
|
||||
var user User
|
||||
cacheKey := fmt.Sprintf("user:%d", userID)
|
||||
|
||||
// Try to get from cache first
|
||||
err := s.cache.GetOrSet(ctx, cacheKey, &user, 15*time.Minute, func() (interface{}, error) {
|
||||
// Load from database if not in cache
|
||||
return s.loadUserFromDB(userID)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
func (s *UserService) InvalidateUser(ctx context.Context, userID int) error {
|
||||
cacheKey := fmt.Sprintf("user:%d", userID)
|
||||
return s.cache.Delete(ctx, cacheKey)
|
||||
}
|
||||
|
||||
func main() {
|
||||
service := NewUserService()
|
||||
defer cache.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
user, err := service.GetUser(ctx, 123)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Printf("User: %+v", user)
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- **In-Memory**: Fastest but limited by RAM and not distributed
|
||||
- **Redis**: Great for distributed systems, persistent, but network overhead
|
||||
- **Memcache**: Good for distributed caching, simpler than Redis but less features
|
||||
|
||||
Choose based on your needs:
|
||||
- Single instance? Use in-memory
|
||||
- Need persistence or advanced features? Use Redis
|
||||
- Simple distributed cache? Use Memcache
|
||||
|
||||
## License
|
||||
|
||||
See repository license.
|
||||
76
pkg/cache/cache.go
vendored
Normal file
76
pkg/cache/cache.go
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultCache *Cache
|
||||
)
|
||||
|
||||
// Initialize initializes the cache with a provider.
|
||||
// If not called, the package will use an in-memory provider by default.
|
||||
func Initialize(provider Provider) {
|
||||
defaultCache = NewCache(provider)
|
||||
}
|
||||
|
||||
// UseMemory configures the cache to use in-memory storage.
|
||||
func UseMemory(opts *Options) error {
|
||||
provider := NewMemoryProvider(opts)
|
||||
defaultCache = NewCache(provider)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UseRedis configures the cache to use Redis storage.
|
||||
func UseRedis(config *RedisConfig) error {
|
||||
provider, err := NewRedisProvider(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize Redis provider: %w", err)
|
||||
}
|
||||
defaultCache = NewCache(provider)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UseMemcache configures the cache to use Memcache storage.
|
||||
func UseMemcache(config *MemcacheConfig) error {
|
||||
provider, err := NewMemcacheProvider(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize Memcache provider: %w", err)
|
||||
}
|
||||
defaultCache = NewCache(provider)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDefaultCache returns the default cache instance.
|
||||
// Initializes with in-memory provider if not already initialized.
|
||||
func GetDefaultCache() *Cache {
|
||||
if defaultCache == nil {
|
||||
UseMemory(&Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
MaxSize: 10000,
|
||||
})
|
||||
}
|
||||
return defaultCache
|
||||
}
|
||||
|
||||
// SetDefaultCache sets a custom cache instance as the default cache.
|
||||
// This is useful for testing or when you want to use a pre-configured cache instance.
|
||||
func SetDefaultCache(cache *Cache) {
|
||||
defaultCache = cache
|
||||
}
|
||||
|
||||
// GetStats returns cache statistics.
|
||||
func GetStats(ctx context.Context) (*CacheStats, error) {
|
||||
cache := GetDefaultCache()
|
||||
return cache.Stats(ctx)
|
||||
}
|
||||
|
||||
// Close closes the cache and releases resources.
|
||||
func Close() error {
|
||||
if defaultCache != nil {
|
||||
return defaultCache.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
147
pkg/cache/cache_manager.go
vendored
Normal file
147
pkg/cache/cache_manager.go
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cache is the main cache manager that wraps a Provider.
|
||||
type Cache struct {
|
||||
provider Provider
|
||||
}
|
||||
|
||||
// NewCache creates a new cache manager with the specified provider.
|
||||
func NewCache(provider Provider) *Cache {
|
||||
return &Cache{
|
||||
provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves and deserializes a value from the cache.
|
||||
func (c *Cache) Get(ctx context.Context, key string, dest interface{}) error {
|
||||
data, exists := c.provider.Get(ctx, key)
|
||||
if !exists {
|
||||
return fmt.Errorf("key not found: %s", key)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, dest); err != nil {
|
||||
return fmt.Errorf("failed to deserialize: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBytes retrieves raw bytes from the cache.
|
||||
func (c *Cache) GetBytes(ctx context.Context, key string) ([]byte, error) {
|
||||
data, exists := c.provider.Get(ctx, key)
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("key not found: %s", key)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Set serializes and stores a value in the cache with the specified TTL.
|
||||
func (c *Cache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
|
||||
data, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize: %w", err)
|
||||
}
|
||||
|
||||
return c.provider.Set(ctx, key, data, ttl)
|
||||
}
|
||||
|
||||
// SetBytes stores raw bytes in the cache with the specified TTL.
|
||||
func (c *Cache) SetBytes(ctx context.Context, key string, value []byte, ttl time.Duration) error {
|
||||
return c.provider.Set(ctx, key, value, ttl)
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (c *Cache) Delete(ctx context.Context, key string) error {
|
||||
return c.provider.Delete(ctx, key)
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
func (c *Cache) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||
return c.provider.DeleteByPattern(ctx, pattern)
|
||||
}
|
||||
|
||||
// Clear removes all items from the cache.
|
||||
func (c *Cache) Clear(ctx context.Context) error {
|
||||
return c.provider.Clear(ctx)
|
||||
}
|
||||
|
||||
// Exists checks if a key exists in the cache.
|
||||
func (c *Cache) Exists(ctx context.Context, key string) bool {
|
||||
return c.provider.Exists(ctx, key)
|
||||
}
|
||||
|
||||
// Stats returns statistics about the cache.
|
||||
func (c *Cache) Stats(ctx context.Context) (*CacheStats, error) {
|
||||
return c.provider.Stats(ctx)
|
||||
}
|
||||
|
||||
// Close closes the cache and releases any resources.
|
||||
func (c *Cache) Close() error {
|
||||
return c.provider.Close()
|
||||
}
|
||||
|
||||
// GetOrSet retrieves a value from cache, or sets it if it doesn't exist.
|
||||
// The loader function is called only if the key is not found in cache.
|
||||
func (c *Cache) GetOrSet(ctx context.Context, key string, dest interface{}, ttl time.Duration, loader func() (interface{}, error)) error {
|
||||
// Try to get from cache first
|
||||
err := c.Get(ctx, key, dest)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load the value
|
||||
value, err := loader()
|
||||
if err != nil {
|
||||
return fmt.Errorf("loader failed: %w", err)
|
||||
}
|
||||
|
||||
// Store in cache
|
||||
if err := c.Set(ctx, key, value, ttl); err != nil {
|
||||
return fmt.Errorf("failed to cache value: %w", err)
|
||||
}
|
||||
|
||||
// Populate dest with the loaded value
|
||||
data, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize loaded value: %w", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, dest); err != nil {
|
||||
return fmt.Errorf("failed to deserialize loaded value: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remember is a convenience function that caches the result of a function call.
|
||||
// It's similar to GetOrSet but returns the value directly.
|
||||
func (c *Cache) Remember(ctx context.Context, key string, ttl time.Duration, loader func() (interface{}, error)) (interface{}, error) {
|
||||
// Try to get from cache first as bytes
|
||||
data, err := c.GetBytes(ctx, key)
|
||||
if err == nil {
|
||||
var result interface{}
|
||||
if err := json.Unmarshal(data, &result); err == nil {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Load the value
|
||||
value, err := loader()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loader failed: %w", err)
|
||||
}
|
||||
|
||||
// Store in cache
|
||||
if err := c.Set(ctx, key, value, ttl); err != nil {
|
||||
return nil, fmt.Errorf("failed to cache value: %w", err)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
69
pkg/cache/cache_test.go
vendored
Normal file
69
pkg/cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSetDefaultCache(t *testing.T) {
|
||||
// Create a custom cache instance
|
||||
provider := NewMemoryProvider(&Options{
|
||||
DefaultTTL: 1 * time.Minute,
|
||||
MaxSize: 50,
|
||||
})
|
||||
customCache := NewCache(provider)
|
||||
|
||||
// Set it as the default
|
||||
SetDefaultCache(customCache)
|
||||
|
||||
// Verify it's now the default
|
||||
retrievedCache := GetDefaultCache()
|
||||
if retrievedCache != customCache {
|
||||
t.Error("SetDefaultCache did not set the cache correctly")
|
||||
}
|
||||
|
||||
// Test that we can use it
|
||||
ctx := context.Background()
|
||||
testKey := "test_key"
|
||||
testValue := "test_value"
|
||||
|
||||
err := retrievedCache.Set(ctx, testKey, testValue, time.Minute)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set value: %v", err)
|
||||
}
|
||||
|
||||
var result string
|
||||
err = retrievedCache.Get(ctx, testKey, &result)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get value: %v", err)
|
||||
}
|
||||
|
||||
if result != testValue {
|
||||
t.Errorf("Expected %s, got %s", testValue, result)
|
||||
}
|
||||
|
||||
// Clean up - reset to default
|
||||
SetDefaultCache(nil)
|
||||
}
|
||||
|
||||
func TestGetDefaultCacheInitialization(t *testing.T) {
|
||||
// Reset to nil first
|
||||
SetDefaultCache(nil)
|
||||
|
||||
// GetDefaultCache should auto-initialize
|
||||
cache := GetDefaultCache()
|
||||
if cache == nil {
|
||||
t.Error("GetDefaultCache should auto-initialize, got nil")
|
||||
}
|
||||
|
||||
// Should be usable
|
||||
ctx := context.Background()
|
||||
err := cache.Set(ctx, "test", "value", time.Minute)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to use auto-initialized cache: %v", err)
|
||||
}
|
||||
|
||||
// Clean up
|
||||
SetDefaultCache(nil)
|
||||
}
|
||||
252
pkg/cache/example_usage.go
vendored
Normal file
252
pkg/cache/example_usage.go
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ExampleInMemoryCache demonstrates using the in-memory cache provider.
|
||||
func ExampleInMemoryCache() {
|
||||
// Initialize with in-memory provider
|
||||
err := UseMemory(&Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
MaxSize: 1000,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Get the cache instance
|
||||
cache := GetDefaultCache()
|
||||
|
||||
// Store a value
|
||||
type User struct {
|
||||
ID int
|
||||
Name string
|
||||
}
|
||||
|
||||
user := User{ID: 1, Name: "John Doe"}
|
||||
err = cache.Set(ctx, "user:1", user, 10*time.Minute)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Retrieve a value
|
||||
var retrieved User
|
||||
err = cache.Get(ctx, "user:1", &retrieved)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Retrieved user: %+v\n", retrieved)
|
||||
|
||||
// Check if key exists
|
||||
exists := cache.Exists(ctx, "user:1")
|
||||
fmt.Printf("Key exists: %v\n", exists)
|
||||
|
||||
// Delete a key
|
||||
err = cache.Delete(ctx, "user:1")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Get statistics
|
||||
stats, err := cache.Stats(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("Cache stats: %+v\n", stats)
|
||||
}
|
||||
|
||||
// ExampleRedisCache demonstrates using the Redis cache provider.
|
||||
func ExampleRedisCache() {
|
||||
// Initialize with Redis provider
|
||||
err := UseRedis(&RedisConfig{
|
||||
Host: "localhost",
|
||||
Port: 6379,
|
||||
Password: "", // Set if Redis requires authentication
|
||||
DB: 0,
|
||||
Options: &Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Get the cache instance
|
||||
cache := GetDefaultCache()
|
||||
|
||||
// Store raw bytes
|
||||
data := []byte("Hello, Redis!")
|
||||
err = cache.SetBytes(ctx, "greeting", data, 1*time.Hour)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Retrieve raw bytes
|
||||
retrieved, err := cache.GetBytes(ctx, "greeting")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Retrieved data: %s\n", string(retrieved))
|
||||
|
||||
// Clear all cache
|
||||
err = cache.Clear(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// ExampleMemcacheCache demonstrates using the Memcache cache provider.
|
||||
func ExampleMemcacheCache() {
|
||||
// Initialize with Memcache provider
|
||||
err := UseMemcache(&MemcacheConfig{
|
||||
Servers: []string{"localhost:11211"},
|
||||
Options: &Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Get the cache instance
|
||||
cache := GetDefaultCache()
|
||||
|
||||
// Store a value
|
||||
type Product struct {
|
||||
ID int
|
||||
Name string
|
||||
Price float64
|
||||
}
|
||||
|
||||
product := Product{ID: 100, Name: "Widget", Price: 29.99}
|
||||
err = cache.Set(ctx, "product:100", product, 30*time.Minute)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Retrieve a value
|
||||
var retrieved Product
|
||||
err = cache.Get(ctx, "product:100", &retrieved)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Retrieved product: %+v\n", retrieved)
|
||||
}
|
||||
|
||||
// ExampleGetOrSet demonstrates the GetOrSet pattern for lazy loading.
|
||||
func ExampleGetOrSet() {
|
||||
err := UseMemory(&Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
MaxSize: 1000,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer Close()
|
||||
|
||||
ctx := context.Background()
|
||||
cache := GetDefaultCache()
|
||||
|
||||
type ExpensiveData struct {
|
||||
Result string
|
||||
}
|
||||
|
||||
var data ExpensiveData
|
||||
err = cache.GetOrSet(ctx, "expensive:computation", &data, 10*time.Minute, func() (interface{}, error) {
|
||||
// This expensive operation only runs if the key is not in cache
|
||||
fmt.Println("Computing expensive result...")
|
||||
time.Sleep(1 * time.Second)
|
||||
return ExpensiveData{Result: "computed value"}, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Data: %+v\n", data)
|
||||
|
||||
// Second call will use cached value
|
||||
err = cache.GetOrSet(ctx, "expensive:computation", &data, 10*time.Minute, func() (interface{}, error) {
|
||||
fmt.Println("This won't be called!")
|
||||
return ExpensiveData{Result: "new value"}, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Cached data: %+v\n", data)
|
||||
}
|
||||
|
||||
// ExampleCustomProvider demonstrates using a custom provider.
|
||||
func ExampleCustomProvider() {
|
||||
// Create a custom provider
|
||||
memProvider := NewMemoryProvider(&Options{
|
||||
DefaultTTL: 10 * time.Minute,
|
||||
MaxSize: 500,
|
||||
})
|
||||
|
||||
// Initialize with custom provider
|
||||
Initialize(memProvider)
|
||||
defer Close()
|
||||
|
||||
ctx := context.Background()
|
||||
cache := GetDefaultCache()
|
||||
|
||||
// Use the cache
|
||||
err := cache.SetBytes(ctx, "key", []byte("value"), 5*time.Minute)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Clean expired items (memory provider specific)
|
||||
if mp, ok := cache.provider.(*MemoryProvider); ok {
|
||||
count := mp.CleanExpired(ctx)
|
||||
fmt.Printf("Cleaned %d expired items\n", count)
|
||||
}
|
||||
}
|
||||
|
||||
// ExampleDeleteByPattern demonstrates pattern-based deletion (Redis only).
|
||||
func ExampleDeleteByPattern() {
|
||||
err := UseRedis(&RedisConfig{
|
||||
Host: "localhost",
|
||||
Port: 6379,
|
||||
Options: &Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer Close()
|
||||
|
||||
ctx := context.Background()
|
||||
cache := GetDefaultCache()
|
||||
|
||||
// Store multiple keys with a pattern
|
||||
cache.SetBytes(ctx, "user:1:profile", []byte("profile1"), 10*time.Minute)
|
||||
cache.SetBytes(ctx, "user:2:profile", []byte("profile2"), 10*time.Minute)
|
||||
cache.SetBytes(ctx, "user:1:settings", []byte("settings1"), 10*time.Minute)
|
||||
|
||||
// Delete all keys matching pattern (Redis glob pattern)
|
||||
err = cache.DeleteByPattern(ctx, "user:*:profile")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Deleted all user profile keys")
|
||||
}
|
||||
57
pkg/cache/provider.go
vendored
Normal file
57
pkg/cache/provider.go
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Provider defines the interface that all cache providers must implement.
|
||||
type Provider interface {
|
||||
// Get retrieves a value from the cache by key.
|
||||
// Returns nil, false if key doesn't exist or is expired.
|
||||
Get(ctx context.Context, key string) ([]byte, bool)
|
||||
|
||||
// Set stores a value in the cache with the specified TTL.
|
||||
// If ttl is 0, the item never expires.
|
||||
Set(ctx context.Context, key string, value []byte, ttl time.Duration) error
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
// Pattern syntax depends on the provider implementation.
|
||||
DeleteByPattern(ctx context.Context, pattern string) error
|
||||
|
||||
// Clear removes all items from the cache.
|
||||
Clear(ctx context.Context) error
|
||||
|
||||
// Exists checks if a key exists in the cache.
|
||||
Exists(ctx context.Context, key string) bool
|
||||
|
||||
// Close closes the provider and releases any resources.
|
||||
Close() error
|
||||
|
||||
// Stats returns statistics about the cache provider.
|
||||
Stats(ctx context.Context) (*CacheStats, error)
|
||||
}
|
||||
|
||||
// CacheStats contains cache statistics.
|
||||
type CacheStats struct {
|
||||
Hits int64 `json:"hits"`
|
||||
Misses int64 `json:"misses"`
|
||||
Keys int64 `json:"keys"`
|
||||
ProviderType string `json:"provider_type"`
|
||||
ProviderStats map[string]any `json:"provider_stats,omitempty"`
|
||||
}
|
||||
|
||||
// Options contains configuration options for cache providers.
|
||||
type Options struct {
|
||||
// DefaultTTL is the default time-to-live for cache items.
|
||||
DefaultTTL time.Duration
|
||||
|
||||
// MaxSize is the maximum number of items (for in-memory provider).
|
||||
MaxSize int
|
||||
|
||||
// EvictionPolicy determines how items are evicted (LRU, LFU, etc).
|
||||
EvictionPolicy string
|
||||
}
|
||||
144
pkg/cache/provider_memcache.go
vendored
Normal file
144
pkg/cache/provider_memcache.go
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/bradfitz/gomemcache/memcache"
|
||||
)
|
||||
|
||||
// MemcacheProvider is a Memcache implementation of the Provider interface.
|
||||
type MemcacheProvider struct {
|
||||
client *memcache.Client
|
||||
options *Options
|
||||
}
|
||||
|
||||
// MemcacheConfig contains Memcache-specific configuration.
|
||||
type MemcacheConfig struct {
|
||||
// Servers is a list of memcache server addresses (e.g., "localhost:11211")
|
||||
Servers []string
|
||||
|
||||
// MaxIdleConns is the maximum number of idle connections (default: 2)
|
||||
MaxIdleConns int
|
||||
|
||||
// Timeout for connection operations (default: 1 second)
|
||||
Timeout time.Duration
|
||||
|
||||
// Options contains general cache options
|
||||
Options *Options
|
||||
}
|
||||
|
||||
// NewMemcacheProvider creates a new Memcache cache provider.
|
||||
func NewMemcacheProvider(config *MemcacheConfig) (*MemcacheProvider, error) {
|
||||
if config == nil {
|
||||
config = &MemcacheConfig{
|
||||
Servers: []string{"localhost:11211"},
|
||||
}
|
||||
}
|
||||
|
||||
if len(config.Servers) == 0 {
|
||||
config.Servers = []string{"localhost:11211"}
|
||||
}
|
||||
|
||||
if config.MaxIdleConns == 0 {
|
||||
config.MaxIdleConns = 2
|
||||
}
|
||||
|
||||
if config.Timeout == 0 {
|
||||
config.Timeout = 1 * time.Second
|
||||
}
|
||||
|
||||
if config.Options == nil {
|
||||
config.Options = &Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
client := memcache.New(config.Servers...)
|
||||
client.MaxIdleConns = config.MaxIdleConns
|
||||
client.Timeout = config.Timeout
|
||||
|
||||
// Test connection
|
||||
if err := client.Ping(); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to Memcache: %w", err)
|
||||
}
|
||||
|
||||
return &MemcacheProvider{
|
||||
client: client,
|
||||
options: config.Options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get retrieves a value from the cache by key.
|
||||
func (m *MemcacheProvider) Get(ctx context.Context, key string) ([]byte, bool) {
|
||||
item, err := m.client.Get(key)
|
||||
if err == memcache.ErrCacheMiss {
|
||||
return nil, false
|
||||
}
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
return item.Value, true
|
||||
}
|
||||
|
||||
// Set stores a value in the cache with the specified TTL.
|
||||
func (m *MemcacheProvider) Set(ctx context.Context, key string, value []byte, ttl time.Duration) error {
|
||||
if ttl == 0 {
|
||||
ttl = m.options.DefaultTTL
|
||||
}
|
||||
|
||||
item := &memcache.Item{
|
||||
Key: key,
|
||||
Value: value,
|
||||
Expiration: int32(ttl.Seconds()),
|
||||
}
|
||||
|
||||
return m.client.Set(item)
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (m *MemcacheProvider) Delete(ctx context.Context, key string) error {
|
||||
err := m.client.Delete(key)
|
||||
if err == memcache.ErrCacheMiss {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
// Note: Memcache does not support pattern-based deletion natively.
|
||||
// This is a no-op for memcache and returns an error.
|
||||
func (m *MemcacheProvider) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||
return fmt.Errorf("pattern-based deletion is not supported by Memcache")
|
||||
}
|
||||
|
||||
// Clear removes all items from the cache.
|
||||
func (m *MemcacheProvider) Clear(ctx context.Context) error {
|
||||
return m.client.FlushAll()
|
||||
}
|
||||
|
||||
// Exists checks if a key exists in the cache.
|
||||
func (m *MemcacheProvider) Exists(ctx context.Context, key string) bool {
|
||||
_, err := m.client.Get(key)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Close closes the provider and releases any resources.
|
||||
func (m *MemcacheProvider) Close() error {
|
||||
// Memcache client doesn't have a close method
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stats returns statistics about the cache provider.
|
||||
// Note: Memcache provider returns limited statistics.
|
||||
func (m *MemcacheProvider) Stats(ctx context.Context) (*CacheStats, error) {
|
||||
stats := &CacheStats{
|
||||
ProviderType: "memcache",
|
||||
ProviderStats: map[string]any{
|
||||
"note": "Memcache does not provide detailed statistics through the standard client",
|
||||
},
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
226
pkg/cache/provider_memory.go
vendored
Normal file
226
pkg/cache/provider_memory.go
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// memoryItem represents a cached item in memory.
|
||||
type memoryItem struct {
|
||||
Value []byte
|
||||
Expiration time.Time
|
||||
LastAccess time.Time
|
||||
HitCount int64
|
||||
}
|
||||
|
||||
// isExpired checks if the item has expired.
|
||||
func (m *memoryItem) isExpired() bool {
|
||||
if m.Expiration.IsZero() {
|
||||
return false
|
||||
}
|
||||
return time.Now().After(m.Expiration)
|
||||
}
|
||||
|
||||
// MemoryProvider is an in-memory implementation of the Provider interface.
|
||||
type MemoryProvider struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]*memoryItem
|
||||
options *Options
|
||||
hits int64
|
||||
misses int64
|
||||
}
|
||||
|
||||
// NewMemoryProvider creates a new in-memory cache provider.
|
||||
func NewMemoryProvider(opts *Options) *MemoryProvider {
|
||||
if opts == nil {
|
||||
opts = &Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
MaxSize: 10000,
|
||||
}
|
||||
}
|
||||
|
||||
return &MemoryProvider{
|
||||
items: make(map[string]*memoryItem),
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a value from the cache by key.
|
||||
func (m *MemoryProvider) Get(ctx context.Context, key string) ([]byte, bool) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
item, exists := m.items[key]
|
||||
if !exists {
|
||||
m.misses++
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if item.isExpired() {
|
||||
delete(m.items, key)
|
||||
m.misses++
|
||||
return nil, false
|
||||
}
|
||||
|
||||
item.LastAccess = time.Now()
|
||||
item.HitCount++
|
||||
m.hits++
|
||||
|
||||
return item.Value, true
|
||||
}
|
||||
|
||||
// Set stores a value in the cache with the specified TTL.
|
||||
func (m *MemoryProvider) Set(ctx context.Context, key string, value []byte, ttl time.Duration) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if ttl == 0 {
|
||||
ttl = m.options.DefaultTTL
|
||||
}
|
||||
|
||||
var expiration time.Time
|
||||
if ttl > 0 {
|
||||
expiration = time.Now().Add(ttl)
|
||||
}
|
||||
|
||||
// Check max size and evict if necessary
|
||||
if m.options.MaxSize > 0 && len(m.items) >= m.options.MaxSize {
|
||||
if _, exists := m.items[key]; !exists {
|
||||
m.evictOne()
|
||||
}
|
||||
}
|
||||
|
||||
m.items[key] = &memoryItem{
|
||||
Value: value,
|
||||
Expiration: expiration,
|
||||
LastAccess: time.Now(),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (m *MemoryProvider) Delete(ctx context.Context, key string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
delete(m.items, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
func (m *MemoryProvider) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid pattern: %w", err)
|
||||
}
|
||||
|
||||
for key := range m.items {
|
||||
if re.MatchString(key) {
|
||||
delete(m.items, key)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear removes all items from the cache.
|
||||
func (m *MemoryProvider) Clear(ctx context.Context) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.items = make(map[string]*memoryItem)
|
||||
m.hits = 0
|
||||
m.misses = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Exists checks if a key exists in the cache.
|
||||
func (m *MemoryProvider) Exists(ctx context.Context, key string) bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
item, exists := m.items[key]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
|
||||
return !item.isExpired()
|
||||
}
|
||||
|
||||
// Close closes the provider and releases any resources.
|
||||
func (m *MemoryProvider) Close() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.items = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stats returns statistics about the cache provider.
|
||||
func (m *MemoryProvider) Stats(ctx context.Context) (*CacheStats, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
// Clean expired items first
|
||||
validKeys := 0
|
||||
for _, item := range m.items {
|
||||
if !item.isExpired() {
|
||||
validKeys++
|
||||
}
|
||||
}
|
||||
|
||||
return &CacheStats{
|
||||
Hits: m.hits,
|
||||
Misses: m.misses,
|
||||
Keys: int64(validKeys),
|
||||
ProviderType: "memory",
|
||||
ProviderStats: map[string]any{
|
||||
"capacity": m.options.MaxSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// evictOne removes one item from the cache using LRU strategy.
|
||||
func (m *MemoryProvider) evictOne() {
|
||||
var oldestKey string
|
||||
var oldestTime time.Time
|
||||
|
||||
for key, item := range m.items {
|
||||
if item.isExpired() {
|
||||
delete(m.items, key)
|
||||
return
|
||||
}
|
||||
|
||||
if oldestKey == "" || item.LastAccess.Before(oldestTime) {
|
||||
oldestKey = key
|
||||
oldestTime = item.LastAccess
|
||||
}
|
||||
}
|
||||
|
||||
if oldestKey != "" {
|
||||
delete(m.items, oldestKey)
|
||||
}
|
||||
}
|
||||
|
||||
// CleanExpired removes all expired items from the cache.
|
||||
func (m *MemoryProvider) CleanExpired(ctx context.Context) int {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
count := 0
|
||||
for key, item := range m.items {
|
||||
if item.isExpired() {
|
||||
delete(m.items, key)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
185
pkg/cache/provider_redis.go
vendored
Normal file
185
pkg/cache/provider_redis.go
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// RedisProvider is a Redis implementation of the Provider interface.
|
||||
type RedisProvider struct {
|
||||
client *redis.Client
|
||||
options *Options
|
||||
}
|
||||
|
||||
// RedisConfig contains Redis-specific configuration.
|
||||
type RedisConfig struct {
|
||||
// Host is the Redis server host (default: localhost)
|
||||
Host string
|
||||
|
||||
// Port is the Redis server port (default: 6379)
|
||||
Port int
|
||||
|
||||
// Password for Redis authentication (optional)
|
||||
Password string
|
||||
|
||||
// DB is the Redis database number (default: 0)
|
||||
DB int
|
||||
|
||||
// PoolSize is the maximum number of connections (default: 10)
|
||||
PoolSize int
|
||||
|
||||
// Options contains general cache options
|
||||
Options *Options
|
||||
}
|
||||
|
||||
// NewRedisProvider creates a new Redis cache provider.
|
||||
func NewRedisProvider(config *RedisConfig) (*RedisProvider, error) {
|
||||
if config == nil {
|
||||
config = &RedisConfig{
|
||||
Host: "localhost",
|
||||
Port: 6379,
|
||||
DB: 0,
|
||||
}
|
||||
}
|
||||
|
||||
if config.Host == "" {
|
||||
config.Host = "localhost"
|
||||
}
|
||||
if config.Port == 0 {
|
||||
config.Port = 6379
|
||||
}
|
||||
if config.PoolSize == 0 {
|
||||
config.PoolSize = 10
|
||||
}
|
||||
|
||||
if config.Options == nil {
|
||||
config.Options = &Options{
|
||||
DefaultTTL: 5 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
client := redis.NewClient(&redis.Options{
|
||||
Addr: fmt.Sprintf("%s:%d", config.Host, config.Port),
|
||||
Password: config.Password,
|
||||
DB: config.DB,
|
||||
PoolSize: config.PoolSize,
|
||||
})
|
||||
|
||||
// Test connection
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := client.Ping(ctx).Err(); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to Redis: %w", err)
|
||||
}
|
||||
|
||||
return &RedisProvider{
|
||||
client: client,
|
||||
options: config.Options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get retrieves a value from the cache by key.
|
||||
func (r *RedisProvider) Get(ctx context.Context, key string) ([]byte, bool) {
|
||||
val, err := r.client.Get(ctx, key).Bytes()
|
||||
if err == redis.Nil {
|
||||
return nil, false
|
||||
}
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
return val, true
|
||||
}
|
||||
|
||||
// Set stores a value in the cache with the specified TTL.
|
||||
func (r *RedisProvider) Set(ctx context.Context, key string, value []byte, ttl time.Duration) error {
|
||||
if ttl == 0 {
|
||||
ttl = r.options.DefaultTTL
|
||||
}
|
||||
|
||||
return r.client.Set(ctx, key, value, ttl).Err()
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (r *RedisProvider) Delete(ctx context.Context, key string) error {
|
||||
return r.client.Del(ctx, key).Err()
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
func (r *RedisProvider) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||
iter := r.client.Scan(ctx, 0, pattern, 0).Iterator()
|
||||
pipe := r.client.Pipeline()
|
||||
|
||||
count := 0
|
||||
for iter.Next(ctx) {
|
||||
pipe.Del(ctx, iter.Val())
|
||||
count++
|
||||
|
||||
// Execute pipeline in batches of 100
|
||||
if count%100 == 0 {
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
pipe = r.client.Pipeline()
|
||||
}
|
||||
}
|
||||
|
||||
if err := iter.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Execute remaining commands
|
||||
if count%100 != 0 {
|
||||
_, err := pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear removes all items from the cache.
|
||||
func (r *RedisProvider) Clear(ctx context.Context) error {
|
||||
return r.client.FlushDB(ctx).Err()
|
||||
}
|
||||
|
||||
// Exists checks if a key exists in the cache.
|
||||
func (r *RedisProvider) Exists(ctx context.Context, key string) bool {
|
||||
result, err := r.client.Exists(ctx, key).Result()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return result > 0
|
||||
}
|
||||
|
||||
// Close closes the provider and releases any resources.
|
||||
func (r *RedisProvider) Close() error {
|
||||
return r.client.Close()
|
||||
}
|
||||
|
||||
// Stats returns statistics about the cache provider.
|
||||
func (r *RedisProvider) Stats(ctx context.Context) (*CacheStats, error) {
|
||||
info, err := r.client.Info(ctx, "stats", "keyspace").Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get Redis stats: %w", err)
|
||||
}
|
||||
|
||||
dbSize, err := r.client.DBSize(ctx).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get DB size: %w", err)
|
||||
}
|
||||
|
||||
// Parse stats from INFO command
|
||||
// This is a simplified version - you may want to parse more detailed stats
|
||||
stats := &CacheStats{
|
||||
Keys: dbSize,
|
||||
ProviderType: "redis",
|
||||
ProviderStats: map[string]any{
|
||||
"info": info,
|
||||
},
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
127
pkg/cache/query_cache.go
vendored
Normal file
127
pkg/cache/query_cache.go
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// QueryCacheKey represents the components used to build a cache key for query total count
|
||||
type QueryCacheKey struct {
|
||||
TableName string `json:"table_name"`
|
||||
Filters []common.FilterOption `json:"filters"`
|
||||
Sort []common.SortOption `json:"sort"`
|
||||
CustomSQLWhere string `json:"custom_sql_where,omitempty"`
|
||||
CustomSQLOr string `json:"custom_sql_or,omitempty"`
|
||||
Expand []ExpandOptionKey `json:"expand,omitempty"`
|
||||
Distinct bool `json:"distinct,omitempty"`
|
||||
CursorForward string `json:"cursor_forward,omitempty"`
|
||||
CursorBackward string `json:"cursor_backward,omitempty"`
|
||||
}
|
||||
|
||||
// ExpandOptionKey represents expand options for cache key
|
||||
type ExpandOptionKey struct {
|
||||
Relation string `json:"relation"`
|
||||
Where string `json:"where,omitempty"`
|
||||
}
|
||||
|
||||
// BuildQueryCacheKey builds a cache key from query parameters for total count caching
|
||||
// This is used to cache the total count of records matching a query
|
||||
func BuildQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption, customWhere, customOr string) string {
|
||||
key := QueryCacheKey{
|
||||
TableName: tableName,
|
||||
Filters: filters,
|
||||
Sort: sort,
|
||||
CustomSQLWhere: customWhere,
|
||||
CustomSQLOr: customOr,
|
||||
}
|
||||
|
||||
// Serialize to JSON for consistent hashing
|
||||
jsonData, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
// Fallback to simple string concatenation if JSON fails
|
||||
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s", tableName, filters, sort, customWhere, customOr))
|
||||
}
|
||||
|
||||
return hashString(string(jsonData))
|
||||
}
|
||||
|
||||
// BuildExtendedQueryCacheKey builds a cache key for extended query options (restheadspec)
|
||||
// Includes expand, distinct, and cursor pagination options
|
||||
func BuildExtendedQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption,
|
||||
customWhere, customOr string, expandOpts []interface{}, distinct bool, cursorFwd, cursorBwd string) string {
|
||||
|
||||
key := QueryCacheKey{
|
||||
TableName: tableName,
|
||||
Filters: filters,
|
||||
Sort: sort,
|
||||
CustomSQLWhere: customWhere,
|
||||
CustomSQLOr: customOr,
|
||||
Distinct: distinct,
|
||||
CursorForward: cursorFwd,
|
||||
CursorBackward: cursorBwd,
|
||||
}
|
||||
|
||||
// Convert expand options to cache key format
|
||||
if len(expandOpts) > 0 {
|
||||
key.Expand = make([]ExpandOptionKey, 0, len(expandOpts))
|
||||
for _, exp := range expandOpts {
|
||||
// Type assert to get the expand option fields we care about for caching
|
||||
if expMap, ok := exp.(map[string]interface{}); ok {
|
||||
expKey := ExpandOptionKey{}
|
||||
if rel, ok := expMap["relation"].(string); ok {
|
||||
expKey.Relation = rel
|
||||
}
|
||||
if where, ok := expMap["where"].(string); ok {
|
||||
expKey.Where = where
|
||||
}
|
||||
key.Expand = append(key.Expand, expKey)
|
||||
}
|
||||
}
|
||||
// Sort expand options for consistent hashing (already sorted by relation name above)
|
||||
}
|
||||
|
||||
// Serialize to JSON for consistent hashing
|
||||
jsonData, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
// Fallback to simple string concatenation if JSON fails
|
||||
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s_%v_%v_%s_%s",
|
||||
tableName, filters, sort, customWhere, customOr, expandOpts, distinct, cursorFwd, cursorBwd))
|
||||
}
|
||||
|
||||
return hashString(string(jsonData))
|
||||
}
|
||||
|
||||
// hashString computes SHA256 hash of a string
|
||||
func hashString(s string) string {
|
||||
h := sha256.New()
|
||||
h.Write([]byte(s))
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// GetQueryTotalCacheKey returns a formatted cache key for storing/retrieving total count
|
||||
func GetQueryTotalCacheKey(hash string) string {
|
||||
return fmt.Sprintf("query_total:%s", hash)
|
||||
}
|
||||
|
||||
// CachedTotal represents a cached total count
|
||||
type CachedTotal struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// InvalidateCacheForTable removes all cached totals for a specific table
|
||||
// This should be called when data in the table changes (insert/update/delete)
|
||||
func InvalidateCacheForTable(ctx context.Context, tableName string) error {
|
||||
cache := GetDefaultCache()
|
||||
|
||||
// Build a pattern to match all query totals for this table
|
||||
// Note: This requires pattern matching support in the provider
|
||||
pattern := fmt.Sprintf("query_total:*%s*", strings.ToLower(tableName))
|
||||
|
||||
return cache.DeleteByPattern(ctx, pattern)
|
||||
}
|
||||
151
pkg/cache/query_cache_test.go
vendored
Normal file
151
pkg/cache/query_cache_test.go
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
func TestBuildQueryCacheKey(t *testing.T) {
|
||||
filters := []common.FilterOption{
|
||||
{Column: "name", Operator: "eq", Value: "test"},
|
||||
{Column: "age", Operator: "gt", Value: 25},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "name", Direction: "asc"},
|
||||
}
|
||||
|
||||
// Generate cache key
|
||||
key1 := BuildQueryCacheKey("users", filters, sorts, "status = 'active'", "")
|
||||
|
||||
// Same parameters should generate same key
|
||||
key2 := BuildQueryCacheKey("users", filters, sorts, "status = 'active'", "")
|
||||
|
||||
if key1 != key2 {
|
||||
t.Errorf("Expected same cache keys for identical parameters, got %s and %s", key1, key2)
|
||||
}
|
||||
|
||||
// Different parameters should generate different key
|
||||
key3 := BuildQueryCacheKey("users", filters, sorts, "status = 'inactive'", "")
|
||||
|
||||
if key1 == key3 {
|
||||
t.Errorf("Expected different cache keys for different parameters, got %s and %s", key1, key3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildExtendedQueryCacheKey(t *testing.T) {
|
||||
filters := []common.FilterOption{
|
||||
{Column: "name", Operator: "eq", Value: "test"},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "name", Direction: "asc"},
|
||||
}
|
||||
expandOpts := []interface{}{
|
||||
map[string]interface{}{
|
||||
"relation": "posts",
|
||||
"where": "status = 'published'",
|
||||
},
|
||||
}
|
||||
|
||||
// Generate cache key
|
||||
key1 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, false, "", "")
|
||||
|
||||
// Same parameters should generate same key
|
||||
key2 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, false, "", "")
|
||||
|
||||
if key1 != key2 {
|
||||
t.Errorf("Expected same cache keys for identical parameters")
|
||||
}
|
||||
|
||||
// Different distinct value should generate different key
|
||||
key3 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, true, "", "")
|
||||
|
||||
if key1 == key3 {
|
||||
t.Errorf("Expected different cache keys for different distinct values")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetQueryTotalCacheKey(t *testing.T) {
|
||||
hash := "abc123"
|
||||
key := GetQueryTotalCacheKey(hash)
|
||||
|
||||
expected := "query_total:abc123"
|
||||
if key != expected {
|
||||
t.Errorf("Expected %s, got %s", expected, key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedTotalIntegration(t *testing.T) {
|
||||
// Initialize cache with memory provider for testing
|
||||
UseMemory(&Options{
|
||||
DefaultTTL: 1 * time.Minute,
|
||||
MaxSize: 100,
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test data
|
||||
filters := []common.FilterOption{
|
||||
{Column: "status", Operator: "eq", Value: "active"},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "created_at", Direction: "desc"},
|
||||
}
|
||||
|
||||
// Build cache key
|
||||
cacheKeyHash := BuildQueryCacheKey("test_table", filters, sorts, "", "")
|
||||
cacheKey := GetQueryTotalCacheKey(cacheKeyHash)
|
||||
|
||||
// Store a total count in cache
|
||||
totalToCache := CachedTotal{Total: 42}
|
||||
err := GetDefaultCache().Set(ctx, cacheKey, totalToCache, time.Minute)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set cache: %v", err)
|
||||
}
|
||||
|
||||
// Retrieve from cache
|
||||
var cachedTotal CachedTotal
|
||||
err = GetDefaultCache().Get(ctx, cacheKey, &cachedTotal)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get from cache: %v", err)
|
||||
}
|
||||
|
||||
if cachedTotal.Total != 42 {
|
||||
t.Errorf("Expected total 42, got %d", cachedTotal.Total)
|
||||
}
|
||||
|
||||
// Test cache miss
|
||||
nonExistentKey := GetQueryTotalCacheKey("nonexistent")
|
||||
var missedTotal CachedTotal
|
||||
err = GetDefaultCache().Get(ctx, nonExistentKey, &missedTotal)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error for cache miss, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashString(t *testing.T) {
|
||||
input1 := "test string"
|
||||
input2 := "test string"
|
||||
input3 := "different string"
|
||||
|
||||
hash1 := hashString(input1)
|
||||
hash2 := hashString(input2)
|
||||
hash3 := hashString(input3)
|
||||
|
||||
// Same input should produce same hash
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("Expected same hash for identical inputs")
|
||||
}
|
||||
|
||||
// Different input should produce different hash
|
||||
if hash1 == hash3 {
|
||||
t.Errorf("Expected different hash for different inputs")
|
||||
}
|
||||
|
||||
// Hash should be hex encoded SHA256 (64 characters)
|
||||
if len(hash1) != 64 {
|
||||
t.Errorf("Expected hash length of 64, got %d", len(hash1))
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user