149 lines
3.3 KiB
Go
149 lines
3.3 KiB
Go
package queue
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"sync"
|
|
|
|
"git.warky.dev/wdevs/pgsql-broker/pkg/broker/adapter"
|
|
"git.warky.dev/wdevs/pgsql-broker/pkg/broker/models"
|
|
"git.warky.dev/wdevs/pgsql-broker/pkg/broker/worker"
|
|
)
|
|
|
|
// Queue manages a collection of workers for a specific queue number
|
|
type Queue struct {
|
|
Number int
|
|
InstanceID int64
|
|
workers []*worker.Worker
|
|
db adapter.DBAdapter
|
|
logger adapter.Logger
|
|
ctx context.Context
|
|
cancel context.CancelFunc
|
|
mu sync.RWMutex
|
|
workerCount int
|
|
}
|
|
|
|
// Config holds queue configuration
|
|
type Config struct {
|
|
Number int
|
|
InstanceID int64
|
|
WorkerCount int
|
|
DBAdapter adapter.DBAdapter
|
|
Logger adapter.Logger
|
|
BufferSize int
|
|
TimerSeconds int
|
|
FetchSize int
|
|
}
|
|
|
|
// New creates a new queue manager
|
|
func New(cfg Config) *Queue {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
logger := cfg.Logger.With("queue", cfg.Number)
|
|
|
|
return &Queue{
|
|
Number: cfg.Number,
|
|
InstanceID: cfg.InstanceID,
|
|
workers: make([]*worker.Worker, 0, cfg.WorkerCount),
|
|
db: cfg.DBAdapter,
|
|
logger: logger,
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
workerCount: cfg.WorkerCount,
|
|
}
|
|
}
|
|
|
|
// Start initializes and starts all workers in the queue
|
|
func (q *Queue) Start(cfg Config) error {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
|
|
q.logger.Info("starting queue", "worker_count", q.workerCount)
|
|
|
|
for i := 0; i < q.workerCount; i++ {
|
|
w := worker.New(worker.Config{
|
|
ID: i + 1,
|
|
QueueNumber: q.Number,
|
|
InstanceID: q.InstanceID,
|
|
DBAdapter: cfg.DBAdapter,
|
|
Logger: cfg.Logger,
|
|
BufferSize: cfg.BufferSize,
|
|
TimerSeconds: cfg.TimerSeconds,
|
|
FetchSize: cfg.FetchSize,
|
|
})
|
|
|
|
if err := w.Start(q.ctx); err != nil {
|
|
return fmt.Errorf("failed to start worker %d: %w", i+1, err)
|
|
}
|
|
|
|
q.workers = append(q.workers, w)
|
|
}
|
|
|
|
q.logger.Info("queue started successfully", "workers", len(q.workers))
|
|
return nil
|
|
}
|
|
|
|
// Stop gracefully stops all workers in the queue
|
|
func (q *Queue) Stop() error {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
|
|
q.logger.Info("stopping queue")
|
|
|
|
// Cancel context to signal all workers
|
|
q.cancel()
|
|
|
|
// Stop each worker
|
|
var stopErrors []error
|
|
for i, w := range q.workers {
|
|
if err := w.Stop(); err != nil {
|
|
stopErrors = append(stopErrors, fmt.Errorf("worker %d: %w", i+1, err))
|
|
}
|
|
}
|
|
|
|
if len(stopErrors) > 0 {
|
|
return fmt.Errorf("errors stopping workers: %v", stopErrors)
|
|
}
|
|
|
|
q.logger.Info("queue stopped successfully")
|
|
return nil
|
|
}
|
|
|
|
// AddJob adds a job to the least busy worker
|
|
func (q *Queue) AddJob(job models.Job) error {
|
|
q.mu.RLock()
|
|
defer q.mu.RUnlock()
|
|
|
|
if len(q.workers) == 0 {
|
|
return fmt.Errorf("no workers available")
|
|
}
|
|
|
|
// Simple round-robin: use first available worker
|
|
// Could be enhanced with load balancing
|
|
for _, w := range q.workers {
|
|
if err := w.AddJob(job); err == nil {
|
|
return nil
|
|
}
|
|
}
|
|
|
|
return fmt.Errorf("all workers are busy")
|
|
}
|
|
|
|
// GetStats returns statistics for all workers in the queue
|
|
func (q *Queue) GetStats() map[int]worker.Stats {
|
|
q.mu.RLock()
|
|
defer q.mu.RUnlock()
|
|
|
|
stats := make(map[int]worker.Stats)
|
|
for i, w := range q.workers {
|
|
lastActivity, jobsHandled, running := w.GetStats()
|
|
stats[i+1] = worker.Stats{
|
|
LastActivity: lastActivity,
|
|
JobsHandled: jobsHandled,
|
|
Running: running,
|
|
}
|
|
}
|
|
|
|
return stats
|
|
}
|