mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2025-12-29 15:54:26 +00:00
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d4a6f9c4c2 | ||
| 8f83e8fdc1 | |||
|
|
ed67caf055 | ||
| 4d1b8b6982 | |||
|
|
63ed62a9a3 | ||
|
|
0525323a47 | ||
|
|
c3443f702e | ||
|
|
45c463c117 | ||
|
|
84d673ce14 | ||
|
|
02fbdbd651 | ||
|
|
97988e3b5e | ||
|
|
c9838ad9d2 | ||
|
|
c5c0608f63 | ||
|
|
39c3f05d21 | ||
|
|
4ecd1ac17e | ||
|
|
2b1aea0338 |
6
Makefile
6
Makefile
@@ -16,7 +16,7 @@ test: test-unit test-integration
|
||||
# Start PostgreSQL for integration tests
|
||||
docker-up:
|
||||
@echo "Starting PostgreSQL container..."
|
||||
@docker-compose up -d postgres-test
|
||||
@podman compose up -d postgres-test
|
||||
@echo "Waiting for PostgreSQL to be ready..."
|
||||
@sleep 5
|
||||
@echo "PostgreSQL is ready!"
|
||||
@@ -24,12 +24,12 @@ docker-up:
|
||||
# Stop PostgreSQL container
|
||||
docker-down:
|
||||
@echo "Stopping PostgreSQL container..."
|
||||
@docker-compose down
|
||||
@podman compose down
|
||||
|
||||
# Clean up Docker volumes and test data
|
||||
clean:
|
||||
@echo "Cleaning up..."
|
||||
@docker-compose down -v
|
||||
@podman compose down -v
|
||||
@echo "Cleanup complete!"
|
||||
|
||||
# Run integration tests with Docker (full workflow)
|
||||
|
||||
68
go.mod
68
go.mod
@@ -11,15 +11,17 @@ require (
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/jackc/pgx/v5 v5.6.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/redis/go-redis/v9 v9.17.1
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/testcontainers/testcontainers-go v0.40.0
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
github.com/tidwall/sjson v1.2.5
|
||||
github.com/uptrace/bun v1.2.15
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.15
|
||||
github.com/uptrace/bun/driver/sqliteshim v1.2.15
|
||||
github.com/uptrace/bun v1.2.16
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.16
|
||||
github.com/uptrace/bun/driver/sqliteshim v1.2.16
|
||||
github.com/uptrace/bunrouter v1.0.23
|
||||
go.opentelemetry.io/otel v1.38.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0
|
||||
@@ -33,36 +35,68 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker v28.5.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.21.2 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.6.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.10 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.28 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.32 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
@@ -70,28 +104,34 @@ require (
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
|
||||
go.uber.org/multierr v1.10.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect
|
||||
golang.org/x/net v0.43.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
golang.org/x/crypto v0.43.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
|
||||
google.golang.org/grpc v1.75.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/libc v1.66.3 // indirect
|
||||
modernc.org/libc v1.67.0 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.38.0 // indirect
|
||||
modernc.org/sqlite v1.40.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/uptrace/bun => github.com/warkanum/bun v1.2.17
|
||||
|
||||
170
go.sum
170
go.sum
@@ -1,5 +1,13 @@
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf h1:TqhNAT4zKbTdLa62d2HDBFdvgSbIGB3eJE8HqhgiL9I=
|
||||
@@ -8,17 +16,43 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
||||
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
@@ -36,10 +70,13 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
@@ -50,6 +87,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
@@ -71,14 +110,40 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
|
||||
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
|
||||
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
||||
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
@@ -87,6 +152,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
@@ -105,6 +172,10 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
@@ -116,12 +187,16 @@ github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3A
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU=
|
||||
github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
@@ -131,28 +206,38 @@ github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
||||
github.com/uptrace/bun v1.2.15 h1:Ut68XRBLDgp9qG9QBMa9ELWaZOmzHNdczHQdrOZbEFE=
|
||||
github.com/uptrace/bun v1.2.15/go.mod h1:Eghz7NonZMiTX/Z6oKYytJ0oaMEJ/eq3kEV4vSqG038=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.15 h1:7upGMVjFRB1oI78GQw6ruNLblYn5CR+kxqcbbeBBils=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.15/go.mod h1:c7YIDaPNS2CU2uI1p7umFuFWkuKbDcPDDvp+DLHZnkI=
|
||||
github.com/uptrace/bun/driver/sqliteshim v1.2.15 h1:M/rZJSjOPV4OmfTVnDPtL+wJmdMTqDUn8cuk5ycfABA=
|
||||
github.com/uptrace/bun/driver/sqliteshim v1.2.15/go.mod h1:YqwxFyvM992XOCpGJtXyKPkgkb+aZpIIMzGbpaw1hIk=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.16 h1:6wVAiYLj1pMibRthGwy4wDLa3D5AQo32Y8rvwPd8CQ0=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.16/go.mod h1:Z7+5qK8CGZkDQiPMu+LSdVuDuR1I5jcwtkB1Pi3F82E=
|
||||
github.com/uptrace/bun/driver/sqliteshim v1.2.16 h1:M6Dh5kkDWFbUWBrOsIE1g1zdZ5JbSytTD4piFRBOUAI=
|
||||
github.com/uptrace/bun/driver/sqliteshim v1.2.16/go.mod h1:iKdJ06P3XS+pwKcONjSIK07bbhksH3lWsw3mpfr0+bY=
|
||||
github.com/uptrace/bunrouter v1.0.23 h1:Bi7NKw3uCQkcA/GUCtDNPq5LE5UdR9pe+UyWbjHB/wU=
|
||||
github.com/uptrace/bunrouter v1.0.23/go.mod h1:O3jAcl+5qgnF+ejhgkmbceEk0E/mqaK+ADOocdNpY8M=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/warkanum/bun v1.2.17 h1:HP8eTuKSNcqMDhhIPFxEbgV/yct6RR0/c3qHH3PNZUA=
|
||||
github.com/warkanum/bun v1.2.17/go.mod h1:jMoNg2n56ckaawi/O/J92BHaECmrz6IRjuMWqlMaMTM=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||
@@ -173,25 +258,34 @@ go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc h1:TS73t7x3KarrNd5qAipmspBDS1rkMcgVG/fS1aRb4Rc=
|
||||
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
|
||||
@@ -212,18 +306,22 @@ gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
|
||||
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
|
||||
modernc.org/cc/v4 v4.26.2 h1:991HMkLjJzYBIfha6ECZdjrIYz2/1ayr+FL8GN+CNzM=
|
||||
modernc.org/cc/v4 v4.26.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
|
||||
modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
|
||||
modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM=
|
||||
modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ=
|
||||
modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8=
|
||||
modernc.org/libc v1.67.0 h1:QzL4IrKab2OFmxA3/vRYl0tLXrIamwrhD6CKD4WBVjQ=
|
||||
modernc.org/libc v1.67.0/go.mod h1:QvvnnJ5P7aitu0ReNpVIEyesuhmDLQ8kaEoyMjIFZJA=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
@@ -232,8 +330,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.38.0 h1:+4OrfPQ8pxHKuWG4md1JpR/EYAh3Md7TdejuuzE7EUI=
|
||||
modernc.org/sqlite v1.38.0/go.mod h1:1Bj+yES4SVvBZ4cBOpVZ6QgesMCKpJZDq0nxYzOpmNE=
|
||||
modernc.org/sqlite v1.40.1 h1:VfuXcxcUWWKRBuP8+BR9L7VnmusMgBNNnBYGEe9w/iY=
|
||||
modernc.org/sqlite v1.40.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
||||
20
pkg/cache/cache_manager.go
vendored
20
pkg/cache/cache_manager.go
vendored
@@ -57,11 +57,31 @@ func (c *Cache) SetBytes(ctx context.Context, key string, value []byte, ttl time
|
||||
return c.provider.Set(ctx, key, value, ttl)
|
||||
}
|
||||
|
||||
// SetWithTags serializes and stores a value in the cache with the specified TTL and tags.
|
||||
func (c *Cache) SetWithTags(ctx context.Context, key string, value interface{}, ttl time.Duration, tags []string) error {
|
||||
data, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize: %w", err)
|
||||
}
|
||||
|
||||
return c.provider.SetWithTags(ctx, key, data, ttl, tags)
|
||||
}
|
||||
|
||||
// SetBytesWithTags stores raw bytes in the cache with the specified TTL and tags.
|
||||
func (c *Cache) SetBytesWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||
return c.provider.SetWithTags(ctx, key, value, ttl, tags)
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (c *Cache) Delete(ctx context.Context, key string) error {
|
||||
return c.provider.Delete(ctx, key)
|
||||
}
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
func (c *Cache) DeleteByTag(ctx context.Context, tag string) error {
|
||||
return c.provider.DeleteByTag(ctx, tag)
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
func (c *Cache) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||
return c.provider.DeleteByPattern(ctx, pattern)
|
||||
|
||||
8
pkg/cache/provider.go
vendored
8
pkg/cache/provider.go
vendored
@@ -15,9 +15,17 @@ type Provider interface {
|
||||
// If ttl is 0, the item never expires.
|
||||
Set(ctx context.Context, key string, value []byte, ttl time.Duration) error
|
||||
|
||||
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||
// Tags can be used to invalidate groups of related keys.
|
||||
// If ttl is 0, the item never expires.
|
||||
SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
DeleteByTag(ctx context.Context, tag string) error
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
// Pattern syntax depends on the provider implementation.
|
||||
DeleteByPattern(ctx context.Context, pattern string) error
|
||||
|
||||
140
pkg/cache/provider_memcache.go
vendored
140
pkg/cache/provider_memcache.go
vendored
@@ -2,6 +2,7 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -97,8 +98,115 @@ func (m *MemcacheProvider) Set(ctx context.Context, key string, value []byte, tt
|
||||
return m.client.Set(item)
|
||||
}
|
||||
|
||||
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||
// Note: Tag support in Memcache is limited and less efficient than Redis.
|
||||
func (m *MemcacheProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||
if ttl == 0 {
|
||||
ttl = m.options.DefaultTTL
|
||||
}
|
||||
|
||||
expiration := int32(ttl.Seconds())
|
||||
|
||||
// Set the main value
|
||||
item := &memcache.Item{
|
||||
Key: key,
|
||||
Value: value,
|
||||
Expiration: expiration,
|
||||
}
|
||||
if err := m.client.Set(item); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store tags for this key
|
||||
if len(tags) > 0 {
|
||||
tagsData, err := json.Marshal(tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal tags: %w", err)
|
||||
}
|
||||
|
||||
tagsItem := &memcache.Item{
|
||||
Key: fmt.Sprintf("cache:tags:%s", key),
|
||||
Value: tagsData,
|
||||
Expiration: expiration,
|
||||
}
|
||||
if err := m.client.Set(tagsItem); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add key to each tag's key list
|
||||
for _, tag := range tags {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
|
||||
// Get existing keys for this tag
|
||||
var keys []string
|
||||
if item, err := m.client.Get(tagKey); err == nil {
|
||||
_ = json.Unmarshal(item.Value, &keys)
|
||||
}
|
||||
|
||||
// Add current key if not already present
|
||||
found := false
|
||||
for _, k := range keys {
|
||||
if k == key {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
// Store updated key list
|
||||
keysData, err := json.Marshal(keys)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
tagItem := &memcache.Item{
|
||||
Key: tagKey,
|
||||
Value: keysData,
|
||||
Expiration: expiration + 3600, // Give tag lists longer TTL
|
||||
}
|
||||
_ = m.client.Set(tagItem)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (m *MemcacheProvider) Delete(ctx context.Context, key string) error {
|
||||
// Get tags for this key
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
if item, err := m.client.Get(tagsKey); err == nil {
|
||||
var tags []string
|
||||
if err := json.Unmarshal(item.Value, &tags); err == nil {
|
||||
// Remove key from each tag's key list
|
||||
for _, tag := range tags {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
if tagItem, err := m.client.Get(tagKey); err == nil {
|
||||
var keys []string
|
||||
if err := json.Unmarshal(tagItem.Value, &keys); err == nil {
|
||||
// Remove current key from the list
|
||||
newKeys := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
if k != key {
|
||||
newKeys = append(newKeys, k)
|
||||
}
|
||||
}
|
||||
// Update the tag's key list
|
||||
if keysData, err := json.Marshal(newKeys); err == nil {
|
||||
tagItem.Value = keysData
|
||||
_ = m.client.Set(tagItem)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Delete the tags key
|
||||
_ = m.client.Delete(tagsKey)
|
||||
}
|
||||
|
||||
// Delete the actual key
|
||||
err := m.client.Delete(key)
|
||||
if err == memcache.ErrCacheMiss {
|
||||
return nil
|
||||
@@ -106,6 +214,38 @@ func (m *MemcacheProvider) Delete(ctx context.Context, key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
func (m *MemcacheProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
|
||||
// Get all keys associated with this tag
|
||||
item, err := m.client.Get(tagKey)
|
||||
if err == memcache.ErrCacheMiss {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var keys []string
|
||||
if err := json.Unmarshal(item.Value, &keys); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal tag keys: %w", err)
|
||||
}
|
||||
|
||||
// Delete all keys
|
||||
for _, key := range keys {
|
||||
_ = m.client.Delete(key)
|
||||
// Also delete the tags key for this cache key
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
_ = m.client.Delete(tagsKey)
|
||||
}
|
||||
|
||||
// Delete the tag key itself
|
||||
_ = m.client.Delete(tagKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
// Note: Memcache does not support pattern-based deletion natively.
|
||||
// This is a no-op for memcache and returns an error.
|
||||
|
||||
118
pkg/cache/provider_memory.go
vendored
118
pkg/cache/provider_memory.go
vendored
@@ -15,6 +15,7 @@ type memoryItem struct {
|
||||
Expiration time.Time
|
||||
LastAccess time.Time
|
||||
HitCount int64
|
||||
Tags []string
|
||||
}
|
||||
|
||||
// isExpired checks if the item has expired.
|
||||
@@ -27,11 +28,12 @@ func (m *memoryItem) isExpired() bool {
|
||||
|
||||
// MemoryProvider is an in-memory implementation of the Provider interface.
|
||||
type MemoryProvider struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]*memoryItem
|
||||
options *Options
|
||||
hits atomic.Int64
|
||||
misses atomic.Int64
|
||||
mu sync.RWMutex
|
||||
items map[string]*memoryItem
|
||||
tagToKeys map[string]map[string]struct{} // tag -> set of keys
|
||||
options *Options
|
||||
hits atomic.Int64
|
||||
misses atomic.Int64
|
||||
}
|
||||
|
||||
// NewMemoryProvider creates a new in-memory cache provider.
|
||||
@@ -44,8 +46,9 @@ func NewMemoryProvider(opts *Options) *MemoryProvider {
|
||||
}
|
||||
|
||||
return &MemoryProvider{
|
||||
items: make(map[string]*memoryItem),
|
||||
options: opts,
|
||||
items: make(map[string]*memoryItem),
|
||||
tagToKeys: make(map[string]map[string]struct{}),
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,15 +117,116 @@ func (m *MemoryProvider) Set(ctx context.Context, key string, value []byte, ttl
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||
func (m *MemoryProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if ttl == 0 {
|
||||
ttl = m.options.DefaultTTL
|
||||
}
|
||||
|
||||
var expiration time.Time
|
||||
if ttl > 0 {
|
||||
expiration = time.Now().Add(ttl)
|
||||
}
|
||||
|
||||
// Check max size and evict if necessary
|
||||
if m.options.MaxSize > 0 && len(m.items) >= m.options.MaxSize {
|
||||
if _, exists := m.items[key]; !exists {
|
||||
m.evictOne()
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old tag associations if key exists
|
||||
if oldItem, exists := m.items[key]; exists {
|
||||
for _, tag := range oldItem.Tags {
|
||||
if keySet, ok := m.tagToKeys[tag]; ok {
|
||||
delete(keySet, key)
|
||||
if len(keySet) == 0 {
|
||||
delete(m.tagToKeys, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the item
|
||||
m.items[key] = &memoryItem{
|
||||
Value: value,
|
||||
Expiration: expiration,
|
||||
LastAccess: time.Now(),
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
// Add new tag associations
|
||||
for _, tag := range tags {
|
||||
if m.tagToKeys[tag] == nil {
|
||||
m.tagToKeys[tag] = make(map[string]struct{})
|
||||
}
|
||||
m.tagToKeys[tag][key] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (m *MemoryProvider) Delete(ctx context.Context, key string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Remove tag associations
|
||||
if item, exists := m.items[key]; exists {
|
||||
for _, tag := range item.Tags {
|
||||
if keySet, ok := m.tagToKeys[tag]; ok {
|
||||
delete(keySet, key)
|
||||
if len(keySet) == 0 {
|
||||
delete(m.tagToKeys, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(m.items, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
func (m *MemoryProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Get all keys associated with this tag
|
||||
keySet, exists := m.tagToKeys[tag]
|
||||
if !exists {
|
||||
return nil // No keys with this tag
|
||||
}
|
||||
|
||||
// Delete all items with this tag
|
||||
for key := range keySet {
|
||||
if item, ok := m.items[key]; ok {
|
||||
// Remove this tag from the item's tag list
|
||||
newTags := make([]string, 0, len(item.Tags))
|
||||
for _, t := range item.Tags {
|
||||
if t != tag {
|
||||
newTags = append(newTags, t)
|
||||
}
|
||||
}
|
||||
|
||||
// If item has no more tags, delete it
|
||||
// Otherwise update its tags
|
||||
if len(newTags) == 0 {
|
||||
delete(m.items, key)
|
||||
} else {
|
||||
item.Tags = newTags
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the tag mapping
|
||||
delete(m.tagToKeys, tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
func (m *MemoryProvider) DeleteByPattern(ctx context.Context, pattern string) error {
|
||||
m.mu.Lock()
|
||||
|
||||
86
pkg/cache/provider_redis.go
vendored
86
pkg/cache/provider_redis.go
vendored
@@ -103,9 +103,93 @@ func (r *RedisProvider) Set(ctx context.Context, key string, value []byte, ttl t
|
||||
return r.client.Set(ctx, key, value, ttl).Err()
|
||||
}
|
||||
|
||||
// SetWithTags stores a value in the cache with the specified TTL and tags.
|
||||
func (r *RedisProvider) SetWithTags(ctx context.Context, key string, value []byte, ttl time.Duration, tags []string) error {
|
||||
if ttl == 0 {
|
||||
ttl = r.options.DefaultTTL
|
||||
}
|
||||
|
||||
pipe := r.client.Pipeline()
|
||||
|
||||
// Set the value
|
||||
pipe.Set(ctx, key, value, ttl)
|
||||
|
||||
// Add key to each tag's set
|
||||
for _, tag := range tags {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
pipe.SAdd(ctx, tagKey, key)
|
||||
// Set expiration on tag set (longer than cache items to ensure cleanup)
|
||||
if ttl > 0 {
|
||||
pipe.Expire(ctx, tagKey, ttl+time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
// Store tags for this key for later cleanup
|
||||
if len(tags) > 0 {
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
pipe.SAdd(ctx, tagsKey, tags)
|
||||
if ttl > 0 {
|
||||
pipe.Expire(ctx, tagsKey, ttl)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache.
|
||||
func (r *RedisProvider) Delete(ctx context.Context, key string) error {
|
||||
return r.client.Del(ctx, key).Err()
|
||||
pipe := r.client.Pipeline()
|
||||
|
||||
// Get tags for this key
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
tags, err := r.client.SMembers(ctx, tagsKey).Result()
|
||||
if err == nil && len(tags) > 0 {
|
||||
// Remove key from each tag set
|
||||
for _, tag := range tags {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
pipe.SRem(ctx, tagKey, key)
|
||||
}
|
||||
// Delete the tags key
|
||||
pipe.Del(ctx, tagsKey)
|
||||
}
|
||||
|
||||
// Delete the actual key
|
||||
pipe.Del(ctx, key)
|
||||
|
||||
_, err = pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteByTag removes all keys associated with the given tag.
|
||||
func (r *RedisProvider) DeleteByTag(ctx context.Context, tag string) error {
|
||||
tagKey := fmt.Sprintf("cache:tag:%s", tag)
|
||||
|
||||
// Get all keys associated with this tag
|
||||
keys, err := r.client.SMembers(ctx, tagKey).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
pipe := r.client.Pipeline()
|
||||
|
||||
// Delete all keys and their tag associations
|
||||
for _, key := range keys {
|
||||
pipe.Del(ctx, key)
|
||||
// Also delete the tags key for this cache key
|
||||
tagsKey := fmt.Sprintf("cache:tags:%s", key)
|
||||
pipe.Del(ctx, tagsKey)
|
||||
}
|
||||
|
||||
// Delete the tag set itself
|
||||
pipe.Del(ctx, tagKey)
|
||||
|
||||
_, err = pipe.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteByPattern removes all keys matching the pattern.
|
||||
|
||||
151
pkg/cache/query_cache_test.go
vendored
151
pkg/cache/query_cache_test.go
vendored
@@ -1,151 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
func TestBuildQueryCacheKey(t *testing.T) {
|
||||
filters := []common.FilterOption{
|
||||
{Column: "name", Operator: "eq", Value: "test"},
|
||||
{Column: "age", Operator: "gt", Value: 25},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "name", Direction: "asc"},
|
||||
}
|
||||
|
||||
// Generate cache key
|
||||
key1 := BuildQueryCacheKey("users", filters, sorts, "status = 'active'", "")
|
||||
|
||||
// Same parameters should generate same key
|
||||
key2 := BuildQueryCacheKey("users", filters, sorts, "status = 'active'", "")
|
||||
|
||||
if key1 != key2 {
|
||||
t.Errorf("Expected same cache keys for identical parameters, got %s and %s", key1, key2)
|
||||
}
|
||||
|
||||
// Different parameters should generate different key
|
||||
key3 := BuildQueryCacheKey("users", filters, sorts, "status = 'inactive'", "")
|
||||
|
||||
if key1 == key3 {
|
||||
t.Errorf("Expected different cache keys for different parameters, got %s and %s", key1, key3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildExtendedQueryCacheKey(t *testing.T) {
|
||||
filters := []common.FilterOption{
|
||||
{Column: "name", Operator: "eq", Value: "test"},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "name", Direction: "asc"},
|
||||
}
|
||||
expandOpts := []interface{}{
|
||||
map[string]interface{}{
|
||||
"relation": "posts",
|
||||
"where": "status = 'published'",
|
||||
},
|
||||
}
|
||||
|
||||
// Generate cache key
|
||||
key1 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, false, "", "")
|
||||
|
||||
// Same parameters should generate same key
|
||||
key2 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, false, "", "")
|
||||
|
||||
if key1 != key2 {
|
||||
t.Errorf("Expected same cache keys for identical parameters")
|
||||
}
|
||||
|
||||
// Different distinct value should generate different key
|
||||
key3 := BuildExtendedQueryCacheKey("users", filters, sorts, "", "", expandOpts, true, "", "")
|
||||
|
||||
if key1 == key3 {
|
||||
t.Errorf("Expected different cache keys for different distinct values")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetQueryTotalCacheKey(t *testing.T) {
|
||||
hash := "abc123"
|
||||
key := GetQueryTotalCacheKey(hash)
|
||||
|
||||
expected := "query_total:abc123"
|
||||
if key != expected {
|
||||
t.Errorf("Expected %s, got %s", expected, key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedTotalIntegration(t *testing.T) {
|
||||
// Initialize cache with memory provider for testing
|
||||
UseMemory(&Options{
|
||||
DefaultTTL: 1 * time.Minute,
|
||||
MaxSize: 100,
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create test data
|
||||
filters := []common.FilterOption{
|
||||
{Column: "status", Operator: "eq", Value: "active"},
|
||||
}
|
||||
sorts := []common.SortOption{
|
||||
{Column: "created_at", Direction: "desc"},
|
||||
}
|
||||
|
||||
// Build cache key
|
||||
cacheKeyHash := BuildQueryCacheKey("test_table", filters, sorts, "", "")
|
||||
cacheKey := GetQueryTotalCacheKey(cacheKeyHash)
|
||||
|
||||
// Store a total count in cache
|
||||
totalToCache := CachedTotal{Total: 42}
|
||||
err := GetDefaultCache().Set(ctx, cacheKey, totalToCache, time.Minute)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set cache: %v", err)
|
||||
}
|
||||
|
||||
// Retrieve from cache
|
||||
var cachedTotal CachedTotal
|
||||
err = GetDefaultCache().Get(ctx, cacheKey, &cachedTotal)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get from cache: %v", err)
|
||||
}
|
||||
|
||||
if cachedTotal.Total != 42 {
|
||||
t.Errorf("Expected total 42, got %d", cachedTotal.Total)
|
||||
}
|
||||
|
||||
// Test cache miss
|
||||
nonExistentKey := GetQueryTotalCacheKey("nonexistent")
|
||||
var missedTotal CachedTotal
|
||||
err = GetDefaultCache().Get(ctx, nonExistentKey, &missedTotal)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error for cache miss, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashString(t *testing.T) {
|
||||
input1 := "test string"
|
||||
input2 := "test string"
|
||||
input3 := "different string"
|
||||
|
||||
hash1 := hashString(input1)
|
||||
hash2 := hashString(input2)
|
||||
hash3 := hashString(input3)
|
||||
|
||||
// Same input should produce same hash
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("Expected same hash for identical inputs")
|
||||
}
|
||||
|
||||
// Different input should produce different hash
|
||||
if hash1 == hash3 {
|
||||
t.Errorf("Expected different hash for different inputs")
|
||||
}
|
||||
|
||||
// Hash should be hex encoded SHA256 (64 characters)
|
||||
if len(hash1) != 64 {
|
||||
t.Errorf("Expected hash length of 64, got %d", len(hash1))
|
||||
}
|
||||
}
|
||||
@@ -208,21 +208,9 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if tableName != "" && !hasTablePrefix(condToCheck) {
|
||||
// If tableName is provided and the condition DOESN'T have a table prefix,
|
||||
// qualify unambiguous column references to prevent "ambiguous column" errors
|
||||
// when there are multiple joins on the same table (e.g., recursive preloads)
|
||||
columnName := extractUnqualifiedColumnName(condToCheck)
|
||||
if columnName != "" && (validColumns == nil || isValidColumn(columnName, validColumns)) {
|
||||
// Qualify the column with the table name
|
||||
// Be careful to only replace the column name, not other occurrences of the string
|
||||
oldRef := columnName
|
||||
newRef := tableName + "." + columnName
|
||||
// Use word boundary matching to avoid replacing partial matches
|
||||
cond = qualifyColumnInCondition(cond, oldRef, newRef)
|
||||
logger.Debug("Qualified unqualified column in condition: '%s' added table prefix '%s'", oldRef, tableName)
|
||||
}
|
||||
}
|
||||
// Note: We no longer add prefixes to unqualified columns here.
|
||||
// Use AddTablePrefixToColumns() separately if you need to add prefixes.
|
||||
|
||||
validConditions = append(validConditions, cond)
|
||||
}
|
||||
@@ -633,3 +621,145 @@ func isValidColumn(columnName string, validColumns map[string]bool) bool {
|
||||
}
|
||||
return validColumns[strings.ToLower(columnName)]
|
||||
}
|
||||
|
||||
// AddTablePrefixToColumns adds table prefix to unqualified column references in a WHERE clause.
|
||||
// This function only prefixes simple column references and skips:
|
||||
// - Columns already having a table prefix (containing a dot)
|
||||
// - Columns inside function calls or expressions (inside parentheses)
|
||||
// - Columns inside subqueries
|
||||
// - Columns that don't exist in the table (validation via model registry)
|
||||
//
|
||||
// Examples:
|
||||
// - "status = 'active'" -> "users.status = 'active'" (if status exists in users table)
|
||||
// - "COALESCE(status, 'default') = 'active'" -> unchanged (status inside function)
|
||||
// - "users.status = 'active'" -> unchanged (already has prefix)
|
||||
// - "(status = 'active')" -> "(users.status = 'active')" (grouping parens are OK)
|
||||
// - "invalid_col = 'value'" -> unchanged (if invalid_col doesn't exist in table)
|
||||
//
|
||||
// Parameters:
|
||||
// - where: The WHERE clause to process
|
||||
// - tableName: The table name to use as prefix
|
||||
//
|
||||
// Returns:
|
||||
// - The WHERE clause with table prefixes added to appropriate and valid columns
|
||||
func AddTablePrefixToColumns(where string, tableName string) string {
|
||||
if where == "" || tableName == "" {
|
||||
return where
|
||||
}
|
||||
|
||||
where = strings.TrimSpace(where)
|
||||
|
||||
// Get valid columns from the model registry for validation
|
||||
validColumns := getValidColumnsForTable(tableName)
|
||||
|
||||
// Split by AND to handle multiple conditions (parenthesis-aware)
|
||||
conditions := splitByAND(where)
|
||||
prefixedConditions := make([]string, 0, len(conditions))
|
||||
|
||||
for _, cond := range conditions {
|
||||
cond = strings.TrimSpace(cond)
|
||||
if cond == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process this condition to add table prefix if appropriate
|
||||
processedCond := addPrefixToSingleCondition(cond, tableName, validColumns)
|
||||
prefixedConditions = append(prefixedConditions, processedCond)
|
||||
}
|
||||
|
||||
if len(prefixedConditions) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.Join(prefixedConditions, " AND ")
|
||||
}
|
||||
|
||||
// addPrefixToSingleCondition adds table prefix to a single condition if appropriate
|
||||
// Returns the condition unchanged if:
|
||||
// - The condition is a SQL literal/expression (true, false, null, 1=1, etc.)
|
||||
// - The column reference is inside a function call
|
||||
// - The column already has a table prefix
|
||||
// - No valid column reference is found
|
||||
// - The column doesn't exist in the table (when validColumns is provided)
|
||||
func addPrefixToSingleCondition(cond string, tableName string, validColumns map[string]bool) string {
|
||||
// Strip outer grouping parentheses to get to the actual condition
|
||||
strippedCond := stripOuterParentheses(cond)
|
||||
|
||||
// Skip SQL literals and trivial conditions (true, false, null, 1=1, etc.)
|
||||
if IsSQLExpression(strippedCond) || IsTrivialCondition(strippedCond) {
|
||||
logger.Debug("Skipping SQL literal/trivial condition: '%s'", strippedCond)
|
||||
return cond
|
||||
}
|
||||
|
||||
// Extract the left side of the comparison (before the operator)
|
||||
columnRef := extractLeftSideOfComparison(strippedCond)
|
||||
if columnRef == "" {
|
||||
return cond
|
||||
}
|
||||
|
||||
// Skip if it already has a prefix (contains a dot)
|
||||
if strings.Contains(columnRef, ".") {
|
||||
logger.Debug("Skipping column '%s' - already has table prefix", columnRef)
|
||||
return cond
|
||||
}
|
||||
|
||||
// Skip if it's a function call or expression (contains parentheses)
|
||||
if strings.Contains(columnRef, "(") {
|
||||
logger.Debug("Skipping column reference '%s' - inside function or expression", columnRef)
|
||||
return cond
|
||||
}
|
||||
|
||||
// Validate that the column exists in the table (if we have column info)
|
||||
if !isValidColumn(columnRef, validColumns) {
|
||||
logger.Debug("Skipping column '%s' - not found in table '%s'", columnRef, tableName)
|
||||
return cond
|
||||
}
|
||||
|
||||
// It's a simple unqualified column reference that exists in the table - add the table prefix
|
||||
newRef := tableName + "." + columnRef
|
||||
result := qualifyColumnInCondition(cond, columnRef, newRef)
|
||||
logger.Debug("Added table prefix to column: '%s' -> '%s'", columnRef, newRef)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// extractLeftSideOfComparison extracts the left side of a comparison operator from a condition.
|
||||
// This is used to identify the column reference that may need a table prefix.
|
||||
//
|
||||
// Examples:
|
||||
// - "status = 'active'" returns "status"
|
||||
// - "COALESCE(status, 'default') = 'active'" returns "COALESCE(status, 'default')"
|
||||
// - "priority > 5" returns "priority"
|
||||
//
|
||||
// Returns empty string if no operator is found.
|
||||
func extractLeftSideOfComparison(cond string) string {
|
||||
operators := []string{" = ", " != ", " <> ", " > ", " >= ", " < ", " <= ", " LIKE ", " like ", " IN ", " in ", " IS ", " is ", " NOT ", " not "}
|
||||
|
||||
// Find the first operator outside of parentheses and quotes
|
||||
minIdx := -1
|
||||
for _, op := range operators {
|
||||
idx := findOperatorOutsideParentheses(cond, op)
|
||||
if idx > 0 && (minIdx == -1 || idx < minIdx) {
|
||||
minIdx = idx
|
||||
}
|
||||
}
|
||||
|
||||
if minIdx > 0 {
|
||||
leftSide := strings.TrimSpace(cond[:minIdx])
|
||||
// Remove any surrounding quotes
|
||||
leftSide = strings.Trim(leftSide, "`\"'")
|
||||
return leftSide
|
||||
}
|
||||
|
||||
// No operator found - might be a boolean column
|
||||
parts := strings.Fields(cond)
|
||||
if len(parts) > 0 {
|
||||
columnRef := strings.Trim(parts[0], "`\"'")
|
||||
// Make sure it's not a SQL keyword
|
||||
if !IsSQLKeyword(strings.ToLower(columnRef)) {
|
||||
return columnRef
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -75,6 +75,25 @@ func CloseErrorTracking() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractContext attempts to find a context.Context in the given arguments.
|
||||
// It returns the found context (or context.Background() if not found) and
|
||||
// the remaining arguments without the context.
|
||||
func extractContext(args ...interface{}) (context.Context, []interface{}) {
|
||||
ctx := context.Background()
|
||||
var newArgs []interface{}
|
||||
found := false
|
||||
|
||||
for _, arg := range args {
|
||||
if c, ok := arg.(context.Context); ok && !found {
|
||||
ctx = c
|
||||
found = true
|
||||
} else {
|
||||
newArgs = append(newArgs, arg)
|
||||
}
|
||||
}
|
||||
return ctx, newArgs
|
||||
}
|
||||
|
||||
func Info(template string, args ...interface{}) {
|
||||
if Logger == nil {
|
||||
log.Printf(template, args...)
|
||||
@@ -84,7 +103,8 @@ func Info(template string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func Warn(template string, args ...interface{}) {
|
||||
message := fmt.Sprintf(template, args...)
|
||||
ctx, remainingArgs := extractContext(args...)
|
||||
message := fmt.Sprintf(template, remainingArgs...)
|
||||
if Logger == nil {
|
||||
log.Printf("%s", message)
|
||||
} else {
|
||||
@@ -93,14 +113,15 @@ func Warn(template string, args ...interface{}) {
|
||||
|
||||
// Send to error tracker
|
||||
if errorTracker != nil {
|
||||
errorTracker.CaptureMessage(context.Background(), message, errortracking.SeverityWarning, map[string]interface{}{
|
||||
errorTracker.CaptureMessage(ctx, message, errortracking.SeverityWarning, map[string]interface{}{
|
||||
"process_id": os.Getpid(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Error(template string, args ...interface{}) {
|
||||
message := fmt.Sprintf(template, args...)
|
||||
ctx, remainingArgs := extractContext(args...)
|
||||
message := fmt.Sprintf(template, remainingArgs...)
|
||||
if Logger == nil {
|
||||
log.Printf("%s", message)
|
||||
} else {
|
||||
@@ -109,7 +130,7 @@ func Error(template string, args ...interface{}) {
|
||||
|
||||
// Send to error tracker
|
||||
if errorTracker != nil {
|
||||
errorTracker.CaptureMessage(context.Background(), message, errortracking.SeverityError, map[string]interface{}{
|
||||
errorTracker.CaptureMessage(ctx, message, errortracking.SeverityError, map[string]interface{}{
|
||||
"process_id": os.Getpid(),
|
||||
})
|
||||
}
|
||||
@@ -124,12 +145,13 @@ func Debug(template string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// CatchPanic - Handle panic
|
||||
func CatchPanicCallback(location string, cb func(err any)) {
|
||||
func CatchPanicCallback(location string, cb func(err any), args ...interface{}) {
|
||||
ctx, _ := extractContext(args...)
|
||||
if err := recover(); err != nil {
|
||||
callstack := debug.Stack()
|
||||
|
||||
if Logger != nil {
|
||||
Error("Panic in %s : %v", location, err)
|
||||
Error("Panic in %s : %v", location, err, ctx) // Pass context implicitly
|
||||
} else {
|
||||
fmt.Printf("%s:PANIC->%+v", location, err)
|
||||
debug.PrintStack()
|
||||
@@ -137,7 +159,7 @@ func CatchPanicCallback(location string, cb func(err any)) {
|
||||
|
||||
// Send to error tracker
|
||||
if errorTracker != nil {
|
||||
errorTracker.CapturePanic(context.Background(), err, callstack, map[string]interface{}{
|
||||
errorTracker.CapturePanic(ctx, err, callstack, map[string]interface{}{
|
||||
"location": location,
|
||||
"process_id": os.Getpid(),
|
||||
})
|
||||
@@ -150,8 +172,8 @@ func CatchPanicCallback(location string, cb func(err any)) {
|
||||
}
|
||||
|
||||
// CatchPanic - Handle panic
|
||||
func CatchPanic(location string) {
|
||||
CatchPanicCallback(location, nil)
|
||||
func CatchPanic(location string, args ...interface{}) {
|
||||
CatchPanicCallback(location, nil, args...)
|
||||
}
|
||||
|
||||
// HandlePanic logs a panic and returns it as an error
|
||||
@@ -163,13 +185,14 @@ func CatchPanic(location string) {
|
||||
// err = logger.HandlePanic("MethodName", r)
|
||||
// }
|
||||
// }()
|
||||
func HandlePanic(methodName string, r any) error {
|
||||
func HandlePanic(methodName string, r any, args ...interface{}) error {
|
||||
ctx, _ := extractContext(args...)
|
||||
stack := debug.Stack()
|
||||
Error("Panic in %s: %v\nStack trace:\n%s", methodName, r, string(stack))
|
||||
Error("Panic in %s: %v\nStack trace:\n%s", methodName, r, string(stack), ctx) // Pass context implicitly
|
||||
|
||||
// Send to error tracker
|
||||
if errorTracker != nil {
|
||||
errorTracker.CapturePanic(context.Background(), r, stack, map[string]interface{}{
|
||||
errorTracker.CapturePanic(ctx, r, stack, map[string]interface{}{
|
||||
"method": methodName,
|
||||
"process_id": os.Getpid(),
|
||||
})
|
||||
|
||||
@@ -39,6 +39,9 @@ type Provider interface {
|
||||
// UpdateEventQueueSize updates the event queue size metric
|
||||
UpdateEventQueueSize(size int64)
|
||||
|
||||
// RecordPanic records a panic event
|
||||
RecordPanic(methodName string)
|
||||
|
||||
// Handler returns an HTTP handler for exposing metrics (e.g., /metrics endpoint)
|
||||
Handler() http.Handler
|
||||
}
|
||||
@@ -75,6 +78,7 @@ func (n *NoOpProvider) RecordEventPublished(source, eventType string) {}
|
||||
func (n *NoOpProvider) RecordEventProcessed(source, eventType, status string, duration time.Duration) {
|
||||
}
|
||||
func (n *NoOpProvider) UpdateEventQueueSize(size int64) {}
|
||||
func (n *NoOpProvider) RecordPanic(methodName string) {}
|
||||
func (n *NoOpProvider) Handler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
|
||||
@@ -20,6 +20,7 @@ type PrometheusProvider struct {
|
||||
cacheHits *prometheus.CounterVec
|
||||
cacheMisses *prometheus.CounterVec
|
||||
cacheSize *prometheus.GaugeVec
|
||||
panicsTotal *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewPrometheusProvider creates a new Prometheus metrics provider
|
||||
@@ -83,6 +84,13 @@ func NewPrometheusProvider() *PrometheusProvider {
|
||||
},
|
||||
[]string{"provider"},
|
||||
),
|
||||
panicsTotal: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "panics_total",
|
||||
Help: "Total number of panics",
|
||||
},
|
||||
[]string{"method"},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,6 +153,11 @@ func (p *PrometheusProvider) UpdateCacheSize(provider string, size int64) {
|
||||
p.cacheSize.WithLabelValues(provider).Set(float64(size))
|
||||
}
|
||||
|
||||
// RecordPanic implements the Provider interface
|
||||
func (p *PrometheusProvider) RecordPanic(methodName string) {
|
||||
p.panicsTotal.WithLabelValues(methodName).Inc()
|
||||
}
|
||||
|
||||
// Handler implements Provider interface
|
||||
func (p *PrometheusProvider) Handler() http.Handler {
|
||||
return promhttp.Handler()
|
||||
|
||||
33
pkg/middleware/panic.go
Normal file
33
pkg/middleware/panic.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
)
|
||||
|
||||
const panicMiddlewareMethodName = "PanicMiddleware"
|
||||
|
||||
// PanicRecovery is a middleware that recovers from panics, logs the error,
|
||||
// sends it to an error tracker, records a metric, and returns a 500 Internal Server Error.
|
||||
func PanicRecovery(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
if rcv := recover(); rcv != nil {
|
||||
// Record the panic metric
|
||||
metrics.GetProvider().RecordPanic(panicMiddlewareMethodName)
|
||||
|
||||
// Log the panic and send to error tracker
|
||||
// We pass the request context so the error tracker can potentially
|
||||
// link the panic to the request trace.
|
||||
ctx := r.Context()
|
||||
err := logger.HandlePanic(panicMiddlewareMethodName, rcv, ctx)
|
||||
|
||||
// Respond with a 500 error
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}()
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
86
pkg/middleware/panic_test.go
Normal file
86
pkg/middleware/panic_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// mockMetricsProvider is a mock for the metrics provider to check if methods are called.
|
||||
type mockMetricsProvider struct {
|
||||
metrics.NoOpProvider // Embed NoOpProvider to avoid implementing all methods
|
||||
panicRecorded bool
|
||||
methodName string
|
||||
}
|
||||
|
||||
func (m *mockMetricsProvider) RecordPanic(methodName string) {
|
||||
m.panicRecorded = true
|
||||
m.methodName = methodName
|
||||
}
|
||||
|
||||
func TestPanicRecovery(t *testing.T) {
|
||||
// Initialize a mock logger to avoid actual logging output during tests
|
||||
logger.Init(true)
|
||||
|
||||
// Setup mock metrics provider
|
||||
mockProvider := &mockMetricsProvider{}
|
||||
originalProvider := metrics.GetProvider()
|
||||
metrics.SetProvider(mockProvider)
|
||||
defer metrics.SetProvider(originalProvider) // Restore original provider after test
|
||||
|
||||
// 1. Test case: A handler that panics
|
||||
t.Run("recovers from panic and returns 500", func(t *testing.T) {
|
||||
// Reset mock state for this sub-test
|
||||
mockProvider.panicRecorded = false
|
||||
mockProvider.methodName = ""
|
||||
|
||||
panicHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
panic("something went terribly wrong")
|
||||
})
|
||||
|
||||
// Create the middleware wrapping the panicking handler
|
||||
testHandler := PanicRecovery(panicHandler)
|
||||
|
||||
// Create a test request and response recorder
|
||||
req := httptest.NewRequest("GET", "http://example.com/foo", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
// Serve the request
|
||||
testHandler.ServeHTTP(rr, req)
|
||||
|
||||
// Assertions
|
||||
assert.Equal(t, http.StatusInternalServerError, rr.Code, "expected status code to be 500")
|
||||
assert.Contains(t, rr.Body.String(), "panic in PanicMiddleware: something went terribly wrong", "expected error message in response body")
|
||||
|
||||
// Assert that the metric was recorded
|
||||
assert.True(t, mockProvider.panicRecorded, "expected RecordPanic to be called on metrics provider")
|
||||
assert.Equal(t, panicMiddlewareMethodName, mockProvider.methodName, "expected panic to be recorded with the correct method name")
|
||||
})
|
||||
|
||||
// 2. Test case: A handler that does NOT panic
|
||||
t.Run("does not interfere with a non-panicking handler", func(t *testing.T) {
|
||||
// Reset mock state for this sub-test
|
||||
mockProvider.panicRecorded = false
|
||||
|
||||
successHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("OK"))
|
||||
})
|
||||
|
||||
testHandler := PanicRecovery(successHandler)
|
||||
|
||||
req := httptest.NewRequest("GET", "http://example.com/foo", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
testHandler.ServeHTTP(rr, req)
|
||||
|
||||
// Assertions
|
||||
assert.Equal(t, http.StatusOK, rr.Code, "expected status code to be 200")
|
||||
assert.Equal(t, "OK", rr.Body.String(), "expected 'OK' response body")
|
||||
assert.False(t, mockProvider.panicRecorded, "expected RecordPanic to not be called when there is no panic")
|
||||
})
|
||||
}
|
||||
@@ -273,25 +273,151 @@ handler.SetOpenAPIGenerator(func() (string, error) {
|
||||
})
|
||||
```
|
||||
|
||||
## Using with Swagger UI
|
||||
## Using the Built-in UI Handler
|
||||
|
||||
You can serve the generated OpenAPI spec with Swagger UI:
|
||||
The package includes a built-in UI handler that serves popular OpenAPI visualization tools. No need to download or manage static files - everything is served from CDN.
|
||||
|
||||
### Quick Start
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/bitechdev/ResolveSpec/pkg/openapi"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func main() {
|
||||
router := mux.NewRouter()
|
||||
|
||||
// Setup your API routes and OpenAPI generator...
|
||||
// (see examples above)
|
||||
|
||||
// Add the UI handler - defaults to Swagger UI
|
||||
openapi.SetupUIRoute(router, "/docs", openapi.UIConfig{
|
||||
UIType: openapi.SwaggerUI,
|
||||
SpecURL: "/openapi",
|
||||
Title: "My API Documentation",
|
||||
})
|
||||
|
||||
// Now visit http://localhost:8080/docs
|
||||
http.ListenAndServe(":8080", router)
|
||||
}
|
||||
```
|
||||
|
||||
### Supported UI Frameworks
|
||||
|
||||
The handler supports four popular OpenAPI UI frameworks:
|
||||
|
||||
#### 1. Swagger UI (Default)
|
||||
The most widely used OpenAPI UI with excellent compatibility and features.
|
||||
|
||||
```go
|
||||
openapi.SetupUIRoute(router, "/docs", openapi.UIConfig{
|
||||
UIType: openapi.SwaggerUI,
|
||||
Theme: "dark", // optional: "light" or "dark"
|
||||
})
|
||||
```
|
||||
|
||||
#### 2. RapiDoc
|
||||
Modern, customizable, and feature-rich OpenAPI UI.
|
||||
|
||||
```go
|
||||
openapi.SetupUIRoute(router, "/docs", openapi.UIConfig{
|
||||
UIType: openapi.RapiDoc,
|
||||
Theme: "dark",
|
||||
})
|
||||
```
|
||||
|
||||
#### 3. Redoc
|
||||
Clean, responsive documentation with great UX.
|
||||
|
||||
```go
|
||||
openapi.SetupUIRoute(router, "/docs", openapi.UIConfig{
|
||||
UIType: openapi.Redoc,
|
||||
})
|
||||
```
|
||||
|
||||
#### 4. Scalar
|
||||
Modern and sleek OpenAPI documentation.
|
||||
|
||||
```go
|
||||
openapi.SetupUIRoute(router, "/docs", openapi.UIConfig{
|
||||
UIType: openapi.Scalar,
|
||||
Theme: "dark",
|
||||
})
|
||||
```
|
||||
|
||||
### Configuration Options
|
||||
|
||||
```go
|
||||
type UIConfig struct {
|
||||
UIType UIType // SwaggerUI, RapiDoc, Redoc, or Scalar
|
||||
SpecURL string // URL to OpenAPI spec (default: "/openapi")
|
||||
Title string // Page title (default: "API Documentation")
|
||||
FaviconURL string // Custom favicon URL (optional)
|
||||
CustomCSS string // Custom CSS to inject (optional)
|
||||
Theme string // "light" or "dark" (support varies by UI)
|
||||
}
|
||||
```
|
||||
|
||||
### Custom Styling Example
|
||||
|
||||
```go
|
||||
openapi.SetupUIRoute(router, "/docs", openapi.UIConfig{
|
||||
UIType: openapi.SwaggerUI,
|
||||
Title: "Acme Corp API",
|
||||
CustomCSS: `
|
||||
.swagger-ui .topbar {
|
||||
background-color: #1976d2;
|
||||
}
|
||||
.swagger-ui .info .title {
|
||||
color: #1976d2;
|
||||
}
|
||||
`,
|
||||
})
|
||||
```
|
||||
|
||||
### Using Multiple UIs
|
||||
|
||||
You can serve different UIs at different paths:
|
||||
|
||||
```go
|
||||
// Swagger UI at /docs
|
||||
openapi.SetupUIRoute(router, "/docs", openapi.UIConfig{
|
||||
UIType: openapi.SwaggerUI,
|
||||
})
|
||||
|
||||
// Redoc at /redoc
|
||||
openapi.SetupUIRoute(router, "/redoc", openapi.UIConfig{
|
||||
UIType: openapi.Redoc,
|
||||
})
|
||||
|
||||
// RapiDoc at /api-docs
|
||||
openapi.SetupUIRoute(router, "/api-docs", openapi.UIConfig{
|
||||
UIType: openapi.RapiDoc,
|
||||
})
|
||||
```
|
||||
|
||||
### Manual Handler Usage
|
||||
|
||||
If you need more control, use the handler directly:
|
||||
|
||||
```go
|
||||
handler := openapi.UIHandler(openapi.UIConfig{
|
||||
UIType: openapi.SwaggerUI,
|
||||
SpecURL: "/api/openapi.json",
|
||||
})
|
||||
|
||||
router.Handle("/documentation", handler)
|
||||
```
|
||||
|
||||
## Using with External Swagger UI
|
||||
|
||||
Alternatively, you can use an external Swagger UI instance:
|
||||
|
||||
1. Get the spec from `/openapi`
|
||||
2. Load it in Swagger UI at `https://petstore.swagger.io/`
|
||||
3. Or self-host Swagger UI and point it to your `/openapi` endpoint
|
||||
|
||||
Example with self-hosted Swagger UI:
|
||||
|
||||
```go
|
||||
// Serve Swagger UI static files
|
||||
router.PathPrefix("/swagger/").Handler(
|
||||
http.StripPrefix("/swagger/", http.FileServer(http.Dir("./swagger-ui"))),
|
||||
)
|
||||
|
||||
// Configure Swagger UI to use /openapi
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
You can test the OpenAPI endpoint:
|
||||
|
||||
@@ -183,6 +183,69 @@ func ExampleWithFuncSpec() {
|
||||
_ = generatorFunc
|
||||
}
|
||||
|
||||
// ExampleWithUIHandler shows how to serve OpenAPI documentation with a web UI
|
||||
func ExampleWithUIHandler(db *gorm.DB) {
|
||||
// Create handler and configure OpenAPI generator
|
||||
handler := restheadspec.NewHandlerWithGORM(db)
|
||||
registry := modelregistry.NewModelRegistry()
|
||||
|
||||
handler.SetOpenAPIGenerator(func() (string, error) {
|
||||
generator := NewGenerator(GeneratorConfig{
|
||||
Title: "My API",
|
||||
Description: "API documentation with interactive UI",
|
||||
Version: "1.0.0",
|
||||
BaseURL: "http://localhost:8080",
|
||||
Registry: registry,
|
||||
IncludeRestheadSpec: true,
|
||||
})
|
||||
return generator.GenerateJSON()
|
||||
})
|
||||
|
||||
// Setup routes
|
||||
router := mux.NewRouter()
|
||||
restheadspec.SetupMuxRoutes(router, handler, nil)
|
||||
|
||||
// Add UI handlers for different frameworks
|
||||
// Swagger UI at /docs (most popular)
|
||||
SetupUIRoute(router, "/docs", UIConfig{
|
||||
UIType: SwaggerUI,
|
||||
SpecURL: "/openapi",
|
||||
Title: "My API - Swagger UI",
|
||||
Theme: "light",
|
||||
})
|
||||
|
||||
// RapiDoc at /rapidoc (modern alternative)
|
||||
SetupUIRoute(router, "/rapidoc", UIConfig{
|
||||
UIType: RapiDoc,
|
||||
SpecURL: "/openapi",
|
||||
Title: "My API - RapiDoc",
|
||||
})
|
||||
|
||||
// Redoc at /redoc (clean and responsive)
|
||||
SetupUIRoute(router, "/redoc", UIConfig{
|
||||
UIType: Redoc,
|
||||
SpecURL: "/openapi",
|
||||
Title: "My API - Redoc",
|
||||
})
|
||||
|
||||
// Scalar at /scalar (modern and sleek)
|
||||
SetupUIRoute(router, "/scalar", UIConfig{
|
||||
UIType: Scalar,
|
||||
SpecURL: "/openapi",
|
||||
Title: "My API - Scalar",
|
||||
Theme: "dark",
|
||||
})
|
||||
|
||||
// Now you can access:
|
||||
// http://localhost:8080/docs - Swagger UI
|
||||
// http://localhost:8080/rapidoc - RapiDoc
|
||||
// http://localhost:8080/redoc - Redoc
|
||||
// http://localhost:8080/scalar - Scalar
|
||||
// http://localhost:8080/openapi - Raw OpenAPI JSON
|
||||
|
||||
_ = router
|
||||
}
|
||||
|
||||
// ExampleCustomization shows advanced customization options
|
||||
func ExampleCustomization() {
|
||||
// Create registry and register models with descriptions using struct tags
|
||||
|
||||
294
pkg/openapi/ui_handler.go
Normal file
294
pkg/openapi/ui_handler.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// UIType represents the type of OpenAPI UI to serve
|
||||
type UIType string
|
||||
|
||||
const (
|
||||
// SwaggerUI is the most popular OpenAPI UI
|
||||
SwaggerUI UIType = "swagger-ui"
|
||||
// RapiDoc is a modern, customizable OpenAPI UI
|
||||
RapiDoc UIType = "rapidoc"
|
||||
// Redoc is a clean, responsive OpenAPI UI
|
||||
Redoc UIType = "redoc"
|
||||
// Scalar is a modern and sleek OpenAPI UI
|
||||
Scalar UIType = "scalar"
|
||||
)
|
||||
|
||||
// UIConfig holds configuration for the OpenAPI UI handler
|
||||
type UIConfig struct {
|
||||
// UIType specifies which UI framework to use (default: SwaggerUI)
|
||||
UIType UIType
|
||||
// SpecURL is the URL to the OpenAPI spec JSON (default: "/openapi")
|
||||
SpecURL string
|
||||
// Title is the page title (default: "API Documentation")
|
||||
Title string
|
||||
// FaviconURL is the URL to the favicon (optional)
|
||||
FaviconURL string
|
||||
// CustomCSS allows injecting custom CSS (optional)
|
||||
CustomCSS string
|
||||
// Theme for the UI (light/dark, depends on UI type)
|
||||
Theme string
|
||||
}
|
||||
|
||||
// UIHandler creates an HTTP handler that serves an OpenAPI UI
|
||||
func UIHandler(config UIConfig) http.HandlerFunc {
|
||||
// Set defaults
|
||||
if config.UIType == "" {
|
||||
config.UIType = SwaggerUI
|
||||
}
|
||||
if config.SpecURL == "" {
|
||||
config.SpecURL = "/openapi"
|
||||
}
|
||||
if config.Title == "" {
|
||||
config.Title = "API Documentation"
|
||||
}
|
||||
if config.Theme == "" {
|
||||
config.Theme = "light"
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var htmlContent string
|
||||
var err error
|
||||
|
||||
switch config.UIType {
|
||||
case SwaggerUI:
|
||||
htmlContent, err = generateSwaggerUI(config)
|
||||
case RapiDoc:
|
||||
htmlContent, err = generateRapiDoc(config)
|
||||
case Redoc:
|
||||
htmlContent, err = generateRedoc(config)
|
||||
case Scalar:
|
||||
htmlContent, err = generateScalar(config)
|
||||
default:
|
||||
http.Error(w, "Unsupported UI type", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to generate UI: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = w.Write([]byte(htmlContent))
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to write response: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// templateData wraps UIConfig to properly handle CSS in templates
|
||||
type templateData struct {
|
||||
UIConfig
|
||||
SafeCustomCSS template.CSS
|
||||
}
|
||||
|
||||
// generateSwaggerUI generates the HTML for Swagger UI
|
||||
func generateSwaggerUI(config UIConfig) (string, error) {
|
||||
tmpl := `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{{.Title}}</title>
|
||||
{{if .FaviconURL}}<link rel="icon" type="image/png" href="{{.FaviconURL}}">{{end}}
|
||||
<link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui.css">
|
||||
{{if .SafeCustomCSS}}<style>{{.SafeCustomCSS}}</style>{{end}}
|
||||
<style>
|
||||
html { box-sizing: border-box; overflow: -moz-scrollbars-vertical; overflow-y: scroll; }
|
||||
*, *:before, *:after { box-sizing: inherit; }
|
||||
body { margin: 0; padding: 0; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="swagger-ui"></div>
|
||||
<script src="https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui-bundle.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui-standalone-preset.js"></script>
|
||||
<script>
|
||||
window.onload = function() {
|
||||
const ui = SwaggerUIBundle({
|
||||
url: "{{.SpecURL}}",
|
||||
dom_id: '#swagger-ui',
|
||||
deepLinking: true,
|
||||
presets: [
|
||||
SwaggerUIBundle.presets.apis,
|
||||
SwaggerUIStandalonePreset
|
||||
],
|
||||
plugins: [
|
||||
SwaggerUIBundle.plugins.DownloadUrl
|
||||
],
|
||||
layout: "StandaloneLayout",
|
||||
{{if eq .Theme "dark"}}
|
||||
syntaxHighlight: {
|
||||
activate: true,
|
||||
theme: "monokai"
|
||||
}
|
||||
{{end}}
|
||||
});
|
||||
window.ui = ui;
|
||||
};
|
||||
</script>
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
t, err := template.New("swagger").Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data := templateData{
|
||||
UIConfig: config,
|
||||
SafeCustomCSS: template.CSS(config.CustomCSS),
|
||||
}
|
||||
|
||||
var buf strings.Builder
|
||||
if err := t.Execute(&buf, data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// generateRapiDoc generates the HTML for RapiDoc
|
||||
func generateRapiDoc(config UIConfig) (string, error) {
|
||||
theme := "light"
|
||||
if config.Theme == "dark" {
|
||||
theme = "dark"
|
||||
}
|
||||
|
||||
tmpl := `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{{.Title}}</title>
|
||||
{{if .FaviconURL}}<link rel="icon" type="image/png" href="{{.FaviconURL}}">{{end}}
|
||||
<script type="module" src="https://unpkg.com/rapidoc/dist/rapidoc-min.js"></script>
|
||||
{{if .SafeCustomCSS}}<style>{{.SafeCustomCSS}}</style>{{end}}
|
||||
</head>
|
||||
<body>
|
||||
<rapi-doc
|
||||
spec-url="{{.SpecURL}}"
|
||||
theme="` + theme + `"
|
||||
render-style="read"
|
||||
show-header="true"
|
||||
show-info="true"
|
||||
allow-try="true"
|
||||
allow-server-selection="true"
|
||||
allow-authentication="true"
|
||||
api-key-name="Authorization"
|
||||
api-key-location="header"
|
||||
></rapi-doc>
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
t, err := template.New("rapidoc").Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data := templateData{
|
||||
UIConfig: config,
|
||||
SafeCustomCSS: template.CSS(config.CustomCSS),
|
||||
}
|
||||
|
||||
var buf strings.Builder
|
||||
if err := t.Execute(&buf, data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// generateRedoc generates the HTML for Redoc
|
||||
func generateRedoc(config UIConfig) (string, error) {
|
||||
tmpl := `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{{.Title}}</title>
|
||||
{{if .FaviconURL}}<link rel="icon" type="image/png" href="{{.FaviconURL}}">{{end}}
|
||||
{{if .SafeCustomCSS}}<style>{{.SafeCustomCSS}}</style>{{end}}
|
||||
<style>
|
||||
body { margin: 0; padding: 0; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<redoc spec-url="{{.SpecURL}}" {{if eq .Theme "dark"}}theme='{"colors": {"primary": {"main": "#dd5522"}}}'{{end}}></redoc>
|
||||
<script src="https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js"></script>
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
t, err := template.New("redoc").Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data := templateData{
|
||||
UIConfig: config,
|
||||
SafeCustomCSS: template.CSS(config.CustomCSS),
|
||||
}
|
||||
|
||||
var buf strings.Builder
|
||||
if err := t.Execute(&buf, data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// generateScalar generates the HTML for Scalar
|
||||
func generateScalar(config UIConfig) (string, error) {
|
||||
tmpl := `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{{.Title}}</title>
|
||||
{{if .FaviconURL}}<link rel="icon" type="image/png" href="{{.FaviconURL}}">{{end}}
|
||||
{{if .SafeCustomCSS}}<style>{{.SafeCustomCSS}}</style>{{end}}
|
||||
<style>
|
||||
body { margin: 0; padding: 0; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<script id="api-reference" data-url="{{.SpecURL}}" {{if eq .Theme "dark"}}data-theme="dark"{{end}}></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
t, err := template.New("scalar").Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data := templateData{
|
||||
UIConfig: config,
|
||||
SafeCustomCSS: template.CSS(config.CustomCSS),
|
||||
}
|
||||
|
||||
var buf strings.Builder
|
||||
if err := t.Execute(&buf, data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// SetupUIRoute adds the OpenAPI UI route to a mux router
|
||||
// This is a convenience function for the most common use case
|
||||
func SetupUIRoute(router *mux.Router, path string, config UIConfig) {
|
||||
router.Handle(path, UIHandler(config))
|
||||
}
|
||||
308
pkg/openapi/ui_handler_test.go
Normal file
308
pkg/openapi/ui_handler_test.go
Normal file
@@ -0,0 +1,308 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func TestUIHandler_SwaggerUI(t *testing.T) {
|
||||
config := UIConfig{
|
||||
UIType: SwaggerUI,
|
||||
SpecURL: "/openapi",
|
||||
Title: "Test API Docs",
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
resp := w.Result()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body := w.Body.String()
|
||||
|
||||
// Check for Swagger UI specific content
|
||||
if !strings.Contains(body, "swagger-ui") {
|
||||
t.Error("Expected Swagger UI content")
|
||||
}
|
||||
if !strings.Contains(body, "SwaggerUIBundle") {
|
||||
t.Error("Expected SwaggerUIBundle script")
|
||||
}
|
||||
if !strings.Contains(body, config.Title) {
|
||||
t.Errorf("Expected title '%s' in HTML", config.Title)
|
||||
}
|
||||
if !strings.Contains(body, config.SpecURL) {
|
||||
t.Errorf("Expected spec URL '%s' in HTML", config.SpecURL)
|
||||
}
|
||||
if !strings.Contains(body, "swagger-ui-dist") {
|
||||
t.Error("Expected Swagger UI CDN link")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_RapiDoc(t *testing.T) {
|
||||
config := UIConfig{
|
||||
UIType: RapiDoc,
|
||||
SpecURL: "/api/spec",
|
||||
Title: "RapiDoc Test",
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
resp := w.Result()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body := w.Body.String()
|
||||
|
||||
// Check for RapiDoc specific content
|
||||
if !strings.Contains(body, "rapi-doc") {
|
||||
t.Error("Expected rapi-doc element")
|
||||
}
|
||||
if !strings.Contains(body, "rapidoc-min.js") {
|
||||
t.Error("Expected RapiDoc script")
|
||||
}
|
||||
if !strings.Contains(body, config.Title) {
|
||||
t.Errorf("Expected title '%s' in HTML", config.Title)
|
||||
}
|
||||
if !strings.Contains(body, config.SpecURL) {
|
||||
t.Errorf("Expected spec URL '%s' in HTML", config.SpecURL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_Redoc(t *testing.T) {
|
||||
config := UIConfig{
|
||||
UIType: Redoc,
|
||||
SpecURL: "/spec.json",
|
||||
Title: "Redoc Test",
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
resp := w.Result()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body := w.Body.String()
|
||||
|
||||
// Check for Redoc specific content
|
||||
if !strings.Contains(body, "<redoc") {
|
||||
t.Error("Expected redoc element")
|
||||
}
|
||||
if !strings.Contains(body, "redoc.standalone.js") {
|
||||
t.Error("Expected Redoc script")
|
||||
}
|
||||
if !strings.Contains(body, config.Title) {
|
||||
t.Errorf("Expected title '%s' in HTML", config.Title)
|
||||
}
|
||||
if !strings.Contains(body, config.SpecURL) {
|
||||
t.Errorf("Expected spec URL '%s' in HTML", config.SpecURL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_Scalar(t *testing.T) {
|
||||
config := UIConfig{
|
||||
UIType: Scalar,
|
||||
SpecURL: "/openapi.json",
|
||||
Title: "Scalar Test",
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
resp := w.Result()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body := w.Body.String()
|
||||
|
||||
// Check for Scalar specific content
|
||||
if !strings.Contains(body, "api-reference") {
|
||||
t.Error("Expected api-reference element")
|
||||
}
|
||||
if !strings.Contains(body, "@scalar/api-reference") {
|
||||
t.Error("Expected Scalar script")
|
||||
}
|
||||
if !strings.Contains(body, config.Title) {
|
||||
t.Errorf("Expected title '%s' in HTML", config.Title)
|
||||
}
|
||||
if !strings.Contains(body, config.SpecURL) {
|
||||
t.Errorf("Expected spec URL '%s' in HTML", config.SpecURL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_DefaultValues(t *testing.T) {
|
||||
// Test with empty config to check defaults
|
||||
config := UIConfig{}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
resp := w.Result()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body := w.Body.String()
|
||||
|
||||
// Should default to Swagger UI
|
||||
if !strings.Contains(body, "swagger-ui") {
|
||||
t.Error("Expected default to Swagger UI")
|
||||
}
|
||||
|
||||
// Should default to /openapi spec URL
|
||||
if !strings.Contains(body, "/openapi") {
|
||||
t.Error("Expected default spec URL '/openapi'")
|
||||
}
|
||||
|
||||
// Should default to "API Documentation" title
|
||||
if !strings.Contains(body, "API Documentation") {
|
||||
t.Error("Expected default title 'API Documentation'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_CustomCSS(t *testing.T) {
|
||||
customCSS := ".custom-class { color: red; }"
|
||||
config := UIConfig{
|
||||
UIType: SwaggerUI,
|
||||
CustomCSS: customCSS,
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
body := w.Body.String()
|
||||
|
||||
if !strings.Contains(body, customCSS) {
|
||||
t.Errorf("Expected custom CSS to be included. Body:\n%s", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_Favicon(t *testing.T) {
|
||||
faviconURL := "https://example.com/favicon.ico"
|
||||
config := UIConfig{
|
||||
UIType: SwaggerUI,
|
||||
FaviconURL: faviconURL,
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
body := w.Body.String()
|
||||
|
||||
if !strings.Contains(body, faviconURL) {
|
||||
t.Error("Expected favicon URL to be included")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_DarkTheme(t *testing.T) {
|
||||
config := UIConfig{
|
||||
UIType: SwaggerUI,
|
||||
Theme: "dark",
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
body := w.Body.String()
|
||||
|
||||
// SwaggerUI uses monokai theme for dark mode
|
||||
if !strings.Contains(body, "monokai") {
|
||||
t.Error("Expected dark theme configuration for Swagger UI")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_InvalidUIType(t *testing.T) {
|
||||
config := UIConfig{
|
||||
UIType: "invalid-ui-type",
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
resp := w.Result()
|
||||
if resp.StatusCode != http.StatusBadRequest {
|
||||
t.Errorf("Expected status 400 for invalid UI type, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUIHandler_ContentType(t *testing.T) {
|
||||
config := UIConfig{
|
||||
UIType: SwaggerUI,
|
||||
}
|
||||
|
||||
handler := UIHandler(config)
|
||||
req := httptest.NewRequest("GET", "/docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handler(w, req)
|
||||
|
||||
contentType := w.Header().Get("Content-Type")
|
||||
if !strings.Contains(contentType, "text/html") {
|
||||
t.Errorf("Expected Content-Type to contain 'text/html', got '%s'", contentType)
|
||||
}
|
||||
if !strings.Contains(contentType, "charset=utf-8") {
|
||||
t.Errorf("Expected Content-Type to contain 'charset=utf-8', got '%s'", contentType)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetupUIRoute(t *testing.T) {
|
||||
router := mux.NewRouter()
|
||||
|
||||
config := UIConfig{
|
||||
UIType: SwaggerUI,
|
||||
}
|
||||
|
||||
SetupUIRoute(router, "/api-docs", config)
|
||||
|
||||
// Test that the route was added and works
|
||||
req := httptest.NewRequest("GET", "/api-docs", nil)
|
||||
w := httptest.NewRecorder()
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
// Verify it returns HTML
|
||||
body := w.Body.String()
|
||||
if !strings.Contains(body, "swagger-ui") {
|
||||
t.Error("Expected Swagger UI content")
|
||||
}
|
||||
}
|
||||
118
pkg/resolvespec/cache_helpers.go
Normal file
118
pkg/resolvespec/cache_helpers.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package resolvespec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/cache"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// queryCacheKey represents the components used to build a cache key for query total count
|
||||
type queryCacheKey struct {
|
||||
TableName string `json:"table_name"`
|
||||
Filters []common.FilterOption `json:"filters"`
|
||||
Sort []common.SortOption `json:"sort"`
|
||||
CustomSQLWhere string `json:"custom_sql_where,omitempty"`
|
||||
CustomSQLOr string `json:"custom_sql_or,omitempty"`
|
||||
CursorForward string `json:"cursor_forward,omitempty"`
|
||||
CursorBackward string `json:"cursor_backward,omitempty"`
|
||||
}
|
||||
|
||||
// cachedTotal represents a cached total count
|
||||
type cachedTotal struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// buildQueryCacheKey builds a cache key from query parameters for total count caching
|
||||
func buildQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption, customWhere, customOr string) string {
|
||||
key := queryCacheKey{
|
||||
TableName: tableName,
|
||||
Filters: filters,
|
||||
Sort: sort,
|
||||
CustomSQLWhere: customWhere,
|
||||
CustomSQLOr: customOr,
|
||||
}
|
||||
|
||||
// Serialize to JSON for consistent hashing
|
||||
jsonData, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
// Fallback to simple string concatenation if JSON fails
|
||||
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s", tableName, filters, sort, customWhere, customOr))
|
||||
}
|
||||
|
||||
return hashString(string(jsonData))
|
||||
}
|
||||
|
||||
// buildExtendedQueryCacheKey builds a cache key for extended query options with cursor pagination
|
||||
func buildExtendedQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption,
|
||||
customWhere, customOr string, cursorFwd, cursorBwd string) string {
|
||||
|
||||
key := queryCacheKey{
|
||||
TableName: tableName,
|
||||
Filters: filters,
|
||||
Sort: sort,
|
||||
CustomSQLWhere: customWhere,
|
||||
CustomSQLOr: customOr,
|
||||
CursorForward: cursorFwd,
|
||||
CursorBackward: cursorBwd,
|
||||
}
|
||||
|
||||
// Serialize to JSON for consistent hashing
|
||||
jsonData, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
// Fallback to simple string concatenation if JSON fails
|
||||
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s_%s_%s",
|
||||
tableName, filters, sort, customWhere, customOr, cursorFwd, cursorBwd))
|
||||
}
|
||||
|
||||
return hashString(string(jsonData))
|
||||
}
|
||||
|
||||
// hashString computes SHA256 hash of a string
|
||||
func hashString(s string) string {
|
||||
h := sha256.New()
|
||||
h.Write([]byte(s))
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// getQueryTotalCacheKey returns a formatted cache key for storing/retrieving total count
|
||||
func getQueryTotalCacheKey(hash string) string {
|
||||
return fmt.Sprintf("query_total:%s", hash)
|
||||
}
|
||||
|
||||
// buildCacheTags creates cache tags from schema and table name
|
||||
func buildCacheTags(schema, tableName string) []string {
|
||||
return []string{
|
||||
fmt.Sprintf("schema:%s", strings.ToLower(schema)),
|
||||
fmt.Sprintf("table:%s", strings.ToLower(tableName)),
|
||||
}
|
||||
}
|
||||
|
||||
// setQueryTotalCache stores a query total in the cache with schema and table tags
|
||||
func setQueryTotalCache(ctx context.Context, cacheKey string, total int, schema, tableName string, ttl time.Duration) error {
|
||||
c := cache.GetDefaultCache()
|
||||
cacheData := cachedTotal{Total: total}
|
||||
tags := buildCacheTags(schema, tableName)
|
||||
|
||||
return c.SetWithTags(ctx, cacheKey, cacheData, ttl, tags)
|
||||
}
|
||||
|
||||
// invalidateCacheForTags removes all cached items matching the specified tags
|
||||
func invalidateCacheForTags(ctx context.Context, tags []string) error {
|
||||
c := cache.GetDefaultCache()
|
||||
|
||||
// Invalidate for each tag
|
||||
for _, tag := range tags {
|
||||
if err := c.DeleteByTag(ctx, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -331,19 +331,17 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
// Use extended cache key if cursors are present
|
||||
var cacheKeyHash string
|
||||
if len(options.CursorForward) > 0 || len(options.CursorBackward) > 0 {
|
||||
cacheKeyHash = cache.BuildExtendedQueryCacheKey(
|
||||
cacheKeyHash = buildExtendedQueryCacheKey(
|
||||
tableName,
|
||||
options.Filters,
|
||||
options.Sort,
|
||||
"", // No custom SQL WHERE in resolvespec
|
||||
"", // No custom SQL OR in resolvespec
|
||||
nil, // No expand options in resolvespec
|
||||
false, // distinct not used here
|
||||
"", // No custom SQL WHERE in resolvespec
|
||||
"", // No custom SQL OR in resolvespec
|
||||
options.CursorForward,
|
||||
options.CursorBackward,
|
||||
)
|
||||
} else {
|
||||
cacheKeyHash = cache.BuildQueryCacheKey(
|
||||
cacheKeyHash = buildQueryCacheKey(
|
||||
tableName,
|
||||
options.Filters,
|
||||
options.Sort,
|
||||
@@ -351,10 +349,10 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
"", // No custom SQL OR in resolvespec
|
||||
)
|
||||
}
|
||||
cacheKey := cache.GetQueryTotalCacheKey(cacheKeyHash)
|
||||
cacheKey := getQueryTotalCacheKey(cacheKeyHash)
|
||||
|
||||
// Try to retrieve from cache
|
||||
var cachedTotal cache.CachedTotal
|
||||
var cachedTotal cachedTotal
|
||||
err := cache.GetDefaultCache().Get(ctx, cacheKey, &cachedTotal)
|
||||
if err == nil {
|
||||
total = cachedTotal.Total
|
||||
@@ -371,10 +369,9 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
total = count
|
||||
logger.Debug("Total records (from query): %d", total)
|
||||
|
||||
// Store in cache
|
||||
// Store in cache with schema and table tags
|
||||
cacheTTL := time.Minute * 2 // Default 2 minutes TTL
|
||||
cacheData := cache.CachedTotal{Total: total}
|
||||
if err := cache.GetDefaultCache().Set(ctx, cacheKey, cacheData, cacheTTL); err != nil {
|
||||
if err := setQueryTotalCache(ctx, cacheKey, total, schema, tableName, cacheTTL); err != nil {
|
||||
logger.Warn("Failed to cache query total: %v", err)
|
||||
// Don't fail the request if caching fails
|
||||
} else {
|
||||
@@ -464,6 +461,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully created record with nested data, ID: %v", result.ID)
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, result.Data, nil)
|
||||
return
|
||||
}
|
||||
@@ -480,6 +482,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully created record, rows affected: %d", result.RowsAffected())
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, v, nil)
|
||||
|
||||
case []map[string]interface{}:
|
||||
@@ -518,6 +525,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully created %d records with nested data", len(results))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, results, nil)
|
||||
return
|
||||
}
|
||||
@@ -541,6 +553,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully created %d records", len(v))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, v, nil)
|
||||
|
||||
case []interface{}:
|
||||
@@ -584,6 +601,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully created %d records with nested data", len(results))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, results, nil)
|
||||
return
|
||||
}
|
||||
@@ -611,6 +633,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully created %d records", len(v))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, list, nil)
|
||||
|
||||
default:
|
||||
@@ -661,6 +688,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully updated record with nested data, rows: %d", result.AffectedRows)
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, result.Data, nil)
|
||||
return
|
||||
}
|
||||
@@ -697,6 +729,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
||||
}
|
||||
|
||||
logger.Info("Successfully updated %d records", result.RowsAffected())
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, data, nil)
|
||||
|
||||
case []map[string]interface{}:
|
||||
@@ -735,6 +772,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully updated %d records with nested data", len(results))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, results, nil)
|
||||
return
|
||||
}
|
||||
@@ -758,6 +800,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully updated %d records", len(updates))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, updates, nil)
|
||||
|
||||
case []interface{}:
|
||||
@@ -800,6 +847,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully updated %d records with nested data", len(results))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, results, nil)
|
||||
return
|
||||
}
|
||||
@@ -827,6 +879,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, url
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully updated %d records", len(list))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, list, nil)
|
||||
|
||||
default:
|
||||
@@ -873,6 +930,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully deleted %d records", len(v))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, map[string]interface{}{"deleted": len(v)}, nil)
|
||||
return
|
||||
|
||||
@@ -914,6 +976,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully deleted %d records", deletedCount)
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||
return
|
||||
|
||||
@@ -940,6 +1007,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully deleted %d records", deletedCount)
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||
return
|
||||
|
||||
@@ -998,6 +1070,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
|
||||
logger.Info("Successfully deleted record with ID: %s", id)
|
||||
// Return the deleted record data
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, recordToDelete, nil)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package cache
|
||||
package restheadspec
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,56 +7,42 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/cache"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// QueryCacheKey represents the components used to build a cache key for query total count
|
||||
type QueryCacheKey struct {
|
||||
// expandOptionKey represents expand options for cache key
|
||||
type expandOptionKey struct {
|
||||
Relation string `json:"relation"`
|
||||
Where string `json:"where,omitempty"`
|
||||
}
|
||||
|
||||
// queryCacheKey represents the components used to build a cache key for query total count
|
||||
type queryCacheKey struct {
|
||||
TableName string `json:"table_name"`
|
||||
Filters []common.FilterOption `json:"filters"`
|
||||
Sort []common.SortOption `json:"sort"`
|
||||
CustomSQLWhere string `json:"custom_sql_where,omitempty"`
|
||||
CustomSQLOr string `json:"custom_sql_or,omitempty"`
|
||||
Expand []ExpandOptionKey `json:"expand,omitempty"`
|
||||
Expand []expandOptionKey `json:"expand,omitempty"`
|
||||
Distinct bool `json:"distinct,omitempty"`
|
||||
CursorForward string `json:"cursor_forward,omitempty"`
|
||||
CursorBackward string `json:"cursor_backward,omitempty"`
|
||||
}
|
||||
|
||||
// ExpandOptionKey represents expand options for cache key
|
||||
type ExpandOptionKey struct {
|
||||
Relation string `json:"relation"`
|
||||
Where string `json:"where,omitempty"`
|
||||
// cachedTotal represents a cached total count
|
||||
type cachedTotal struct {
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// BuildQueryCacheKey builds a cache key from query parameters for total count caching
|
||||
// This is used to cache the total count of records matching a query
|
||||
func BuildQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption, customWhere, customOr string) string {
|
||||
key := QueryCacheKey{
|
||||
TableName: tableName,
|
||||
Filters: filters,
|
||||
Sort: sort,
|
||||
CustomSQLWhere: customWhere,
|
||||
CustomSQLOr: customOr,
|
||||
}
|
||||
|
||||
// Serialize to JSON for consistent hashing
|
||||
jsonData, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
// Fallback to simple string concatenation if JSON fails
|
||||
return hashString(fmt.Sprintf("%s_%v_%v_%s_%s", tableName, filters, sort, customWhere, customOr))
|
||||
}
|
||||
|
||||
return hashString(string(jsonData))
|
||||
}
|
||||
|
||||
// BuildExtendedQueryCacheKey builds a cache key for extended query options (restheadspec)
|
||||
// buildExtendedQueryCacheKey builds a cache key for extended query options (restheadspec)
|
||||
// Includes expand, distinct, and cursor pagination options
|
||||
func BuildExtendedQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption,
|
||||
func buildExtendedQueryCacheKey(tableName string, filters []common.FilterOption, sort []common.SortOption,
|
||||
customWhere, customOr string, expandOpts []interface{}, distinct bool, cursorFwd, cursorBwd string) string {
|
||||
|
||||
key := QueryCacheKey{
|
||||
key := queryCacheKey{
|
||||
TableName: tableName,
|
||||
Filters: filters,
|
||||
Sort: sort,
|
||||
@@ -69,11 +55,11 @@ func BuildExtendedQueryCacheKey(tableName string, filters []common.FilterOption,
|
||||
|
||||
// Convert expand options to cache key format
|
||||
if len(expandOpts) > 0 {
|
||||
key.Expand = make([]ExpandOptionKey, 0, len(expandOpts))
|
||||
key.Expand = make([]expandOptionKey, 0, len(expandOpts))
|
||||
for _, exp := range expandOpts {
|
||||
// Type assert to get the expand option fields we care about for caching
|
||||
if expMap, ok := exp.(map[string]interface{}); ok {
|
||||
expKey := ExpandOptionKey{}
|
||||
expKey := expandOptionKey{}
|
||||
if rel, ok := expMap["relation"].(string); ok {
|
||||
expKey.Relation = rel
|
||||
}
|
||||
@@ -83,7 +69,6 @@ func BuildExtendedQueryCacheKey(tableName string, filters []common.FilterOption,
|
||||
key.Expand = append(key.Expand, expKey)
|
||||
}
|
||||
}
|
||||
// Sort expand options for consistent hashing (already sorted by relation name above)
|
||||
}
|
||||
|
||||
// Serialize to JSON for consistent hashing
|
||||
@@ -104,24 +89,38 @@ func hashString(s string) string {
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// GetQueryTotalCacheKey returns a formatted cache key for storing/retrieving total count
|
||||
func GetQueryTotalCacheKey(hash string) string {
|
||||
// getQueryTotalCacheKey returns a formatted cache key for storing/retrieving total count
|
||||
func getQueryTotalCacheKey(hash string) string {
|
||||
return fmt.Sprintf("query_total:%s", hash)
|
||||
}
|
||||
|
||||
// CachedTotal represents a cached total count
|
||||
type CachedTotal struct {
|
||||
Total int `json:"total"`
|
||||
// buildCacheTags creates cache tags from schema and table name
|
||||
func buildCacheTags(schema, tableName string) []string {
|
||||
return []string{
|
||||
fmt.Sprintf("schema:%s", strings.ToLower(schema)),
|
||||
fmt.Sprintf("table:%s", strings.ToLower(tableName)),
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidateCacheForTable removes all cached totals for a specific table
|
||||
// This should be called when data in the table changes (insert/update/delete)
|
||||
func InvalidateCacheForTable(ctx context.Context, tableName string) error {
|
||||
cache := GetDefaultCache()
|
||||
// setQueryTotalCache stores a query total in the cache with schema and table tags
|
||||
func setQueryTotalCache(ctx context.Context, cacheKey string, total int, schema, tableName string, ttl time.Duration) error {
|
||||
c := cache.GetDefaultCache()
|
||||
cacheData := cachedTotal{Total: total}
|
||||
tags := buildCacheTags(schema, tableName)
|
||||
|
||||
// Build a pattern to match all query totals for this table
|
||||
// Note: This requires pattern matching support in the provider
|
||||
pattern := fmt.Sprintf("query_total:*%s*", strings.ToLower(tableName))
|
||||
|
||||
return cache.DeleteByPattern(ctx, pattern)
|
||||
return c.SetWithTags(ctx, cacheKey, cacheData, ttl, tags)
|
||||
}
|
||||
|
||||
// invalidateCacheForTags removes all cached items matching the specified tags
|
||||
func invalidateCacheForTags(ctx context.Context, tags []string) error {
|
||||
c := cache.GetDefaultCache()
|
||||
|
||||
// Invalidate for each tag
|
||||
for _, tag := range tags {
|
||||
if err := c.DeleteByTag(ctx, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -482,8 +482,10 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
// Apply custom SQL WHERE clause (AND condition)
|
||||
if options.CustomSQLWhere != "" {
|
||||
logger.Debug("Applying custom SQL WHERE: %s", options.CustomSQLWhere)
|
||||
// Sanitize and allow preload table prefixes since custom SQL may reference multiple tables
|
||||
sanitizedWhere := common.SanitizeWhereClause(options.CustomSQLWhere, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions)
|
||||
// First add table prefixes to unqualified columns (but skip columns inside function calls)
|
||||
prefixedWhere := common.AddTablePrefixToColumns(options.CustomSQLWhere, reflection.ExtractTableNameOnly(tableName))
|
||||
// Then sanitize and allow preload table prefixes since custom SQL may reference multiple tables
|
||||
sanitizedWhere := common.SanitizeWhereClause(prefixedWhere, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions)
|
||||
if sanitizedWhere != "" {
|
||||
query = query.Where(sanitizedWhere)
|
||||
}
|
||||
@@ -492,8 +494,9 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
// Apply custom SQL WHERE clause (OR condition)
|
||||
if options.CustomSQLOr != "" {
|
||||
logger.Debug("Applying custom SQL OR: %s", options.CustomSQLOr)
|
||||
customOr := common.AddTablePrefixToColumns(options.CustomSQLOr, reflection.ExtractTableNameOnly(tableName))
|
||||
// Sanitize and allow preload table prefixes since custom SQL may reference multiple tables
|
||||
sanitizedOr := common.SanitizeWhereClause(options.CustomSQLOr, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions)
|
||||
sanitizedOr := common.SanitizeWhereClause(customOr, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions)
|
||||
if sanitizedOr != "" {
|
||||
query = query.WhereOr(sanitizedOr)
|
||||
}
|
||||
@@ -529,7 +532,7 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
var total int
|
||||
if !options.SkipCount {
|
||||
// Try to get from cache first (unless SkipCache is true)
|
||||
var cachedTotal *cache.CachedTotal
|
||||
var cachedTotalData *cachedTotal
|
||||
var cacheKey string
|
||||
|
||||
if !options.SkipCache {
|
||||
@@ -543,7 +546,7 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
}
|
||||
}
|
||||
|
||||
cacheKeyHash := cache.BuildExtendedQueryCacheKey(
|
||||
cacheKeyHash := buildExtendedQueryCacheKey(
|
||||
tableName,
|
||||
options.Filters,
|
||||
options.Sort,
|
||||
@@ -554,22 +557,22 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
options.CursorForward,
|
||||
options.CursorBackward,
|
||||
)
|
||||
cacheKey = cache.GetQueryTotalCacheKey(cacheKeyHash)
|
||||
cacheKey = getQueryTotalCacheKey(cacheKeyHash)
|
||||
|
||||
// Try to retrieve from cache
|
||||
cachedTotal = &cache.CachedTotal{}
|
||||
err := cache.GetDefaultCache().Get(ctx, cacheKey, cachedTotal)
|
||||
cachedTotalData = &cachedTotal{}
|
||||
err := cache.GetDefaultCache().Get(ctx, cacheKey, cachedTotalData)
|
||||
if err == nil {
|
||||
total = cachedTotal.Total
|
||||
total = cachedTotalData.Total
|
||||
logger.Debug("Total records (from cache): %d", total)
|
||||
} else {
|
||||
logger.Debug("Cache miss for query total")
|
||||
cachedTotal = nil
|
||||
cachedTotalData = nil
|
||||
}
|
||||
}
|
||||
|
||||
// If not in cache or cache skip, execute count query
|
||||
if cachedTotal == nil {
|
||||
if cachedTotalData == nil {
|
||||
count, err := query.Count(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Error counting records: %v", err)
|
||||
@@ -579,11 +582,10 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
total = count
|
||||
logger.Debug("Total records (from query): %d", total)
|
||||
|
||||
// Store in cache (if caching is enabled)
|
||||
// Store in cache with schema and table tags (if caching is enabled)
|
||||
if !options.SkipCache && cacheKey != "" {
|
||||
cacheTTL := time.Minute * 2 // Default 2 minutes TTL
|
||||
cacheData := &cache.CachedTotal{Total: total}
|
||||
if err := cache.GetDefaultCache().Set(ctx, cacheKey, cacheData, cacheTTL); err != nil {
|
||||
if err := setQueryTotalCache(ctx, cacheKey, total, schema, tableName, cacheTTL); err != nil {
|
||||
logger.Warn("Failed to cache query total: %v", err)
|
||||
// Don't fail the request if caching fails
|
||||
} else {
|
||||
@@ -1149,6 +1151,11 @@ func (h *Handler) handleCreate(ctx context.Context, w common.ResponseWriter, dat
|
||||
}
|
||||
|
||||
logger.Info("Successfully created %d record(s)", len(mergedResults))
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponseWithOptions(w, responseData, nil, &options)
|
||||
}
|
||||
|
||||
@@ -1320,6 +1327,11 @@ func (h *Handler) handleUpdate(ctx context.Context, w common.ResponseWriter, id
|
||||
}
|
||||
|
||||
logger.Info("Successfully updated record with ID: %v", targetID)
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponseWithOptions(w, mergedData, nil, &options)
|
||||
}
|
||||
|
||||
@@ -1388,6 +1400,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully deleted %d records", deletedCount)
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||
return
|
||||
|
||||
@@ -1456,6 +1473,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully deleted %d records", deletedCount)
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||
return
|
||||
|
||||
@@ -1510,6 +1532,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
return
|
||||
}
|
||||
logger.Info("Successfully deleted %d records", deletedCount)
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, map[string]interface{}{"deleted": deletedCount}, nil)
|
||||
return
|
||||
|
||||
@@ -1611,6 +1638,11 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
}
|
||||
|
||||
// Return the deleted record data
|
||||
// Invalidate cache for this table
|
||||
cacheTags := buildCacheTags(schema, tableName)
|
||||
if err := invalidateCacheForTags(ctx, cacheTags); err != nil {
|
||||
logger.Warn("Failed to invalidate cache for table %s: %v", tableName, err)
|
||||
}
|
||||
h.sendResponse(w, recordToDelete, nil)
|
||||
}
|
||||
|
||||
@@ -2107,14 +2139,20 @@ func (h *Handler) sendResponse(w common.ResponseWriter, data interface{}, metada
|
||||
|
||||
// sendResponseWithOptions sends a response with optional formatting
|
||||
func (h *Handler) sendResponseWithOptions(w common.ResponseWriter, data interface{}, metadata *common.Metadata, options *ExtendedRequestOptions) {
|
||||
w.SetHeader("Content-Type", "application/json")
|
||||
if data == nil {
|
||||
data = map[string]interface{}{}
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
// Normalize single-record arrays to objects if requested
|
||||
if options != nil && options.SingleRecordAsObject {
|
||||
data = h.normalizeResultArray(data)
|
||||
}
|
||||
|
||||
// Return data as-is without wrapping in common.Response
|
||||
w.SetHeader("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
if err := w.WriteJSON(data); err != nil {
|
||||
logger.Error("Failed to write JSON response: %v", err)
|
||||
}
|
||||
@@ -2144,12 +2182,30 @@ func (h *Handler) normalizeResultArray(data interface{}) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
if dataValue.Kind() == reflect.String {
|
||||
str := dataValue.String()
|
||||
if str == "" || str == "null" {
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// sendFormattedResponse sends response with formatting options
|
||||
func (h *Handler) sendFormattedResponse(w common.ResponseWriter, data interface{}, metadata *common.Metadata, options ExtendedRequestOptions) {
|
||||
// Normalize single-record arrays to objects if requested
|
||||
httpStatus := http.StatusOK
|
||||
if data == nil {
|
||||
data = map[string]interface{}{}
|
||||
httpStatus = http.StatusPartialContent
|
||||
} else {
|
||||
dataLen := reflection.Len(data)
|
||||
if dataLen == 0 {
|
||||
httpStatus = http.StatusPartialContent
|
||||
}
|
||||
}
|
||||
|
||||
if options.SingleRecordAsObject {
|
||||
data = h.normalizeResultArray(data)
|
||||
}
|
||||
@@ -2168,7 +2224,7 @@ func (h *Handler) sendFormattedResponse(w common.ResponseWriter, data interface{
|
||||
switch options.ResponseFormat {
|
||||
case "simple":
|
||||
// Simple format: just return the data array
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.WriteHeader(httpStatus)
|
||||
if err := w.WriteJSON(data); err != nil {
|
||||
logger.Error("Failed to write JSON response: %v", err)
|
||||
}
|
||||
@@ -2180,7 +2236,7 @@ func (h *Handler) sendFormattedResponse(w common.ResponseWriter, data interface{
|
||||
if metadata != nil {
|
||||
response["count"] = metadata.Total
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.WriteHeader(httpStatus)
|
||||
if err := w.WriteJSON(response); err != nil {
|
||||
logger.Error("Failed to write JSON response: %v", err)
|
||||
}
|
||||
@@ -2191,7 +2247,7 @@ func (h *Handler) sendFormattedResponse(w common.ResponseWriter, data interface{
|
||||
Data: data,
|
||||
Metadata: metadata,
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.WriteHeader(httpStatus)
|
||||
if err := w.WriteJSON(response); err != nil {
|
||||
logger.Error("Failed to write JSON response: %v", err)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package restheadspec
|
||||
@@ -21,12 +22,12 @@ import (
|
||||
|
||||
// Test models
|
||||
type TestUser struct {
|
||||
ID uint `gorm:"primaryKey" json:"id"`
|
||||
Name string `gorm:"not null" json:"name"`
|
||||
Email string `gorm:"uniqueIndex;not null" json:"email"`
|
||||
Age int `json:"age"`
|
||||
Active bool `gorm:"default:true" json:"active"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
ID uint `gorm:"primaryKey" json:"id"`
|
||||
Name string `gorm:"not null" json:"name"`
|
||||
Email string `gorm:"uniqueIndex;not null" json:"email"`
|
||||
Age int `json:"age"`
|
||||
Active bool `gorm:"default:true" json:"active"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Posts []TestPost `gorm:"foreignKey:UserID" json:"posts,omitempty"`
|
||||
}
|
||||
|
||||
@@ -35,13 +36,13 @@ func (TestUser) TableName() string {
|
||||
}
|
||||
|
||||
type TestPost struct {
|
||||
ID uint `gorm:"primaryKey" json:"id"`
|
||||
UserID uint `gorm:"not null" json:"user_id"`
|
||||
Title string `gorm:"not null" json:"title"`
|
||||
Content string `json:"content"`
|
||||
Published bool `gorm:"default:false" json:"published"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
User *TestUser `gorm:"foreignKey:UserID" json:"user,omitempty"`
|
||||
ID uint `gorm:"primaryKey" json:"id"`
|
||||
UserID uint `gorm:"not null" json:"user_id"`
|
||||
Title string `gorm:"not null" json:"title"`
|
||||
Content string `json:"content"`
|
||||
Published bool `gorm:"default:false" json:"published"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
User *TestUser `gorm:"foreignKey:UserID" json:"user,omitempty"`
|
||||
Comments []TestComment `gorm:"foreignKey:PostID" json:"comments,omitempty"`
|
||||
}
|
||||
|
||||
@@ -54,7 +55,7 @@ type TestComment struct {
|
||||
PostID uint `gorm:"not null" json:"post_id"`
|
||||
Content string `gorm:"not null" json:"content"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Post *TestPost `gorm:"foreignKey:PostID" json:"post,omitempty"`
|
||||
Post *TestPost `gorm:"foreignKey:PostID" json:"post,omitempty"`
|
||||
}
|
||||
|
||||
func (TestComment) TableName() string {
|
||||
@@ -401,7 +402,7 @@ func TestIntegration_GetMetadata(t *testing.T) {
|
||||
|
||||
muxRouter.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
if !(w.Code == http.StatusOK || w.Code == http.StatusPartialContent) {
|
||||
t.Errorf("Expected status 200, got %d. Body: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
@@ -492,7 +493,7 @@ func TestIntegration_QueryParamsOverHeaders(t *testing.T) {
|
||||
|
||||
muxRouter.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
if !(w.Code == http.StatusOK || w.Code == http.StatusPartialContent) {
|
||||
t.Errorf("Expected status 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,233 +1,314 @@
|
||||
# Server Package
|
||||
|
||||
Graceful HTTP server with request draining and shutdown coordination.
|
||||
Production-ready HTTP server manager with graceful shutdown, request draining, and comprehensive TLS/HTTPS support.
|
||||
|
||||
## Features
|
||||
|
||||
✅ **Multiple Server Management** - Run multiple HTTP/HTTPS servers concurrently
|
||||
✅ **Graceful Shutdown** - Handles SIGINT/SIGTERM with request draining
|
||||
✅ **Automatic Request Rejection** - New requests get 503 during shutdown
|
||||
✅ **Health & Readiness Endpoints** - Kubernetes-ready health checks
|
||||
✅ **Shutdown Callbacks** - Register cleanup functions (DB, cache, metrics)
|
||||
✅ **Comprehensive TLS Support**:
|
||||
- Certificate files (production)
|
||||
- Self-signed certificates (development/testing)
|
||||
- Let's Encrypt / AutoTLS (automatic certificate management)
|
||||
✅ **GZIP Compression** - Optional response compression
|
||||
✅ **Panic Recovery** - Automatic panic recovery middleware
|
||||
✅ **Configurable Timeouts** - Read, write, idle, drain, and shutdown timeouts
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Single Server
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/server"
|
||||
|
||||
// Create server
|
||||
srv := server.NewGracefulServer(server.Config{
|
||||
Addr: ":8080",
|
||||
Handler: router,
|
||||
// Create server manager
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Add server
|
||||
_, err := mgr.Add(server.Config{
|
||||
Name: "api-server",
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
Handler: myRouter,
|
||||
GZIP: true,
|
||||
})
|
||||
|
||||
// Start server (blocks until shutdown signal)
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
// Start and wait for shutdown signal
|
||||
if err := mgr.ServeWithGracefulShutdown(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Features
|
||||
### Multiple Servers
|
||||
|
||||
✅ Graceful shutdown on SIGINT/SIGTERM
|
||||
✅ Request draining (waits for in-flight requests)
|
||||
✅ Automatic request rejection during shutdown
|
||||
✅ Health and readiness endpoints
|
||||
✅ Shutdown callbacks for cleanup
|
||||
✅ Configurable timeouts
|
||||
```go
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Public API
|
||||
mgr.Add(server.Config{
|
||||
Name: "public-api",
|
||||
Port: 8080,
|
||||
Handler: publicRouter,
|
||||
})
|
||||
|
||||
// Admin API
|
||||
mgr.Add(server.Config{
|
||||
Name: "admin-api",
|
||||
Port: 8081,
|
||||
Handler: adminRouter,
|
||||
})
|
||||
|
||||
// Start all and wait
|
||||
mgr.ServeWithGracefulShutdown()
|
||||
```
|
||||
|
||||
## HTTPS/TLS Configuration
|
||||
|
||||
### Option 1: Certificate Files (Production)
|
||||
|
||||
```go
|
||||
mgr.Add(server.Config{
|
||||
Name: "https-server",
|
||||
Host: "0.0.0.0",
|
||||
Port: 443,
|
||||
Handler: handler,
|
||||
SSLCert: "/etc/ssl/certs/server.crt",
|
||||
SSLKey: "/etc/ssl/private/server.key",
|
||||
})
|
||||
```
|
||||
|
||||
### Option 2: Self-Signed Certificate (Development)
|
||||
|
||||
```go
|
||||
mgr.Add(server.Config{
|
||||
Name: "dev-server",
|
||||
Host: "localhost",
|
||||
Port: 8443,
|
||||
Handler: handler,
|
||||
SelfSignedSSL: true, // Auto-generates certificate
|
||||
})
|
||||
```
|
||||
|
||||
### Option 3: Let's Encrypt / AutoTLS (Production)
|
||||
|
||||
```go
|
||||
mgr.Add(server.Config{
|
||||
Name: "prod-server",
|
||||
Host: "0.0.0.0",
|
||||
Port: 443,
|
||||
Handler: handler,
|
||||
AutoTLS: true,
|
||||
AutoTLSDomains: []string{"example.com", "www.example.com"},
|
||||
AutoTLSEmail: "admin@example.com",
|
||||
AutoTLSCacheDir: "./certs-cache", // Certificate cache directory
|
||||
})
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
```go
|
||||
config := server.Config{
|
||||
// Server address
|
||||
Addr: ":8080",
|
||||
server.Config{
|
||||
// Basic configuration
|
||||
Name: "my-server", // Server name (required)
|
||||
Host: "0.0.0.0", // Bind address
|
||||
Port: 8080, // Port (required)
|
||||
Handler: myRouter, // HTTP handler (required)
|
||||
Description: "My API server", // Optional description
|
||||
|
||||
// HTTP handler
|
||||
Handler: myRouter,
|
||||
// Features
|
||||
GZIP: true, // Enable GZIP compression
|
||||
|
||||
// Maximum time for graceful shutdown (default: 30s)
|
||||
ShutdownTimeout: 30 * time.Second,
|
||||
// TLS/HTTPS (choose one option)
|
||||
SSLCert: "/path/to/cert.pem", // Certificate file
|
||||
SSLKey: "/path/to/key.pem", // Key file
|
||||
SelfSignedSSL: false, // Auto-generate self-signed cert
|
||||
AutoTLS: false, // Let's Encrypt
|
||||
AutoTLSDomains: []string{}, // Domains for AutoTLS
|
||||
AutoTLSEmail: "", // Email for Let's Encrypt
|
||||
AutoTLSCacheDir: "./certs-cache", // Cert cache directory
|
||||
|
||||
// Time to wait for in-flight requests (default: 25s)
|
||||
DrainTimeout: 25 * time.Second,
|
||||
|
||||
// Request read timeout (default: 10s)
|
||||
ReadTimeout: 10 * time.Second,
|
||||
|
||||
// Response write timeout (default: 10s)
|
||||
WriteTimeout: 10 * time.Second,
|
||||
|
||||
// Idle connection timeout (default: 120s)
|
||||
IdleTimeout: 120 * time.Second,
|
||||
// Timeouts
|
||||
ShutdownTimeout: 30 * time.Second, // Max shutdown time
|
||||
DrainTimeout: 25 * time.Second, // Request drain timeout
|
||||
ReadTimeout: 15 * time.Second, // Request read timeout
|
||||
WriteTimeout: 15 * time.Second, // Response write timeout
|
||||
IdleTimeout: 60 * time.Second, // Idle connection timeout
|
||||
}
|
||||
|
||||
srv := server.NewGracefulServer(config)
|
||||
```
|
||||
|
||||
## Shutdown Behavior
|
||||
## Graceful Shutdown
|
||||
|
||||
**Signal received (SIGINT/SIGTERM):**
|
||||
### Automatic (Recommended)
|
||||
|
||||
1. **Mark as shutting down** - New requests get 503
|
||||
2. **Drain requests** - Wait up to `DrainTimeout` for in-flight requests
|
||||
3. **Shutdown server** - Close listeners and connections
|
||||
4. **Execute callbacks** - Run registered cleanup functions
|
||||
```go
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Add servers...
|
||||
|
||||
// This blocks until SIGINT/SIGTERM
|
||||
mgr.ServeWithGracefulShutdown()
|
||||
```
|
||||
Time Event
|
||||
─────────────────────────────────────────
|
||||
0s Signal received: SIGTERM
|
||||
├─ Mark as shutting down
|
||||
├─ Reject new requests (503)
|
||||
└─ Start draining...
|
||||
|
||||
1s In-flight: 50 requests
|
||||
2s In-flight: 32 requests
|
||||
3s In-flight: 12 requests
|
||||
4s In-flight: 3 requests
|
||||
5s In-flight: 0 requests ✓
|
||||
└─ All requests drained
|
||||
### Manual Control
|
||||
|
||||
5s Execute shutdown callbacks
|
||||
6s Shutdown complete
|
||||
```go
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Add and start servers
|
||||
mgr.StartAll()
|
||||
|
||||
// Later... stop gracefully
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := mgr.StopAllWithContext(ctx); err != nil {
|
||||
log.Printf("Shutdown error: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Shutdown Callbacks
|
||||
|
||||
Register cleanup functions to run during shutdown:
|
||||
|
||||
```go
|
||||
// Close database
|
||||
mgr.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
log.Println("Closing database...")
|
||||
return db.Close()
|
||||
})
|
||||
|
||||
// Flush metrics
|
||||
mgr.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
log.Println("Flushing metrics...")
|
||||
return metrics.Flush(ctx)
|
||||
})
|
||||
|
||||
// Close cache
|
||||
mgr.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
log.Println("Closing cache...")
|
||||
return cache.Close()
|
||||
})
|
||||
```
|
||||
|
||||
## Health Checks
|
||||
|
||||
### Health Endpoint
|
||||
|
||||
Returns 200 when healthy, 503 when shutting down:
|
||||
### Adding Health Endpoints
|
||||
|
||||
```go
|
||||
router.HandleFunc("/health", srv.HealthCheckHandler())
|
||||
instance, _ := mgr.Add(server.Config{
|
||||
Name: "api-server",
|
||||
Port: 8080,
|
||||
Handler: router,
|
||||
})
|
||||
|
||||
// Add health endpoints to your router
|
||||
router.HandleFunc("/health", instance.HealthCheckHandler())
|
||||
router.HandleFunc("/ready", instance.ReadinessHandler())
|
||||
```
|
||||
|
||||
**Response (healthy):**
|
||||
### Health Endpoint
|
||||
|
||||
Returns server health status:
|
||||
|
||||
**Healthy (200 OK):**
|
||||
```json
|
||||
{"status":"healthy"}
|
||||
```
|
||||
|
||||
**Response (shutting down):**
|
||||
**Shutting Down (503 Service Unavailable):**
|
||||
```json
|
||||
{"status":"shutting_down"}
|
||||
```
|
||||
|
||||
### Readiness Endpoint
|
||||
|
||||
Includes in-flight request count:
|
||||
Returns readiness with in-flight request count:
|
||||
|
||||
```go
|
||||
router.HandleFunc("/ready", srv.ReadinessHandler())
|
||||
```
|
||||
|
||||
**Response:**
|
||||
**Ready (200 OK):**
|
||||
```json
|
||||
{"ready":true,"in_flight_requests":12}
|
||||
```
|
||||
|
||||
**During shutdown:**
|
||||
**Not Ready (503 Service Unavailable):**
|
||||
```json
|
||||
{"ready":false,"reason":"shutting_down"}
|
||||
```
|
||||
|
||||
## Shutdown Callbacks
|
||||
## Shutdown Behavior
|
||||
|
||||
Register cleanup functions to run during shutdown:
|
||||
When a shutdown signal (SIGINT/SIGTERM) is received:
|
||||
|
||||
```go
|
||||
// Close database
|
||||
server.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
logger.Info("Closing database connection...")
|
||||
return db.Close()
|
||||
})
|
||||
1. **Mark as shutting down** → New requests get 503
|
||||
2. **Execute callbacks** → Run cleanup functions
|
||||
3. **Drain requests** → Wait up to `DrainTimeout` for in-flight requests
|
||||
4. **Shutdown servers** → Close listeners and connections
|
||||
|
||||
// Flush metrics
|
||||
server.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
logger.Info("Flushing metrics...")
|
||||
return metricsProvider.Flush(ctx)
|
||||
})
|
||||
```
|
||||
Time Event
|
||||
─────────────────────────────────────────
|
||||
0s Signal received: SIGTERM
|
||||
├─ Mark servers as shutting down
|
||||
├─ Reject new requests (503)
|
||||
└─ Execute shutdown callbacks
|
||||
|
||||
// Close cache
|
||||
server.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
logger.Info("Closing cache...")
|
||||
return cache.Close()
|
||||
})
|
||||
1s Callbacks complete
|
||||
└─ Start draining requests...
|
||||
|
||||
2s In-flight: 50 requests
|
||||
3s In-flight: 32 requests
|
||||
4s In-flight: 12 requests
|
||||
5s In-flight: 3 requests
|
||||
6s In-flight: 0 requests ✓
|
||||
└─ All requests drained
|
||||
|
||||
6s Shutdown servers
|
||||
7s All servers stopped ✓
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
## Server Management
|
||||
|
||||
### Get Server Instance
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/middleware"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Initialize metrics
|
||||
metricsProvider := metrics.NewPrometheusProvider()
|
||||
metrics.SetProvider(metricsProvider)
|
||||
|
||||
// Create router
|
||||
router := mux.NewRouter()
|
||||
|
||||
// Apply middleware
|
||||
rateLimiter := middleware.NewRateLimiter(100, 20)
|
||||
sizeLimiter := middleware.NewRequestSizeLimiter(middleware.Size10MB)
|
||||
sanitizer := middleware.DefaultSanitizer()
|
||||
|
||||
router.Use(rateLimiter.Middleware)
|
||||
router.Use(sizeLimiter.Middleware)
|
||||
router.Use(sanitizer.Middleware)
|
||||
router.Use(metricsProvider.Middleware)
|
||||
|
||||
// API routes
|
||||
router.HandleFunc("/api/data", dataHandler)
|
||||
|
||||
// Create graceful server
|
||||
srv := server.NewGracefulServer(server.Config{
|
||||
Addr: ":8080",
|
||||
Handler: router,
|
||||
ShutdownTimeout: 30 * time.Second,
|
||||
DrainTimeout: 25 * time.Second,
|
||||
})
|
||||
|
||||
// Health checks
|
||||
router.HandleFunc("/health", srv.HealthCheckHandler())
|
||||
router.HandleFunc("/ready", srv.ReadinessHandler())
|
||||
|
||||
// Metrics endpoint
|
||||
router.Handle("/metrics", metricsProvider.Handler())
|
||||
|
||||
// Register shutdown callbacks
|
||||
server.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
log.Println("Cleanup: Flushing metrics...")
|
||||
return nil
|
||||
})
|
||||
|
||||
server.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
log.Println("Cleanup: Closing database...")
|
||||
// return db.Close()
|
||||
return nil
|
||||
})
|
||||
|
||||
// Start server (blocks until shutdown)
|
||||
log.Printf("Starting server on :8080")
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for shutdown to complete
|
||||
srv.Wait()
|
||||
log.Println("Server stopped")
|
||||
instance, err := mgr.Get("api-server")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
func dataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Your handler logic
|
||||
time.Sleep(100 * time.Millisecond) // Simulate work
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"message":"success"}`))
|
||||
// Check status
|
||||
fmt.Printf("Address: %s\n", instance.Addr())
|
||||
fmt.Printf("Name: %s\n", instance.Name())
|
||||
fmt.Printf("In-flight: %d\n", instance.InFlightRequests())
|
||||
fmt.Printf("Shutting down: %v\n", instance.IsShuttingDown())
|
||||
```
|
||||
|
||||
### List All Servers
|
||||
|
||||
```go
|
||||
instances := mgr.List()
|
||||
for _, instance := range instances {
|
||||
fmt.Printf("Server: %s at %s\n", instance.Name(), instance.Addr())
|
||||
}
|
||||
```
|
||||
|
||||
### Remove Server
|
||||
|
||||
```go
|
||||
// Stop and remove a server
|
||||
if err := mgr.Remove("api-server"); err != nil {
|
||||
log.Printf("Error removing server: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Restart All Servers
|
||||
|
||||
```go
|
||||
// Gracefully restart all servers
|
||||
if err := mgr.RestartAll(); err != nil {
|
||||
log.Printf("Error restarting: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -250,23 +331,21 @@ spec:
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
||||
# Liveness probe - is app running?
|
||||
# Liveness probe
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
|
||||
# Readiness probe - can app handle traffic?
|
||||
# Readiness probe
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
|
||||
# Graceful shutdown
|
||||
lifecycle:
|
||||
@@ -274,26 +353,12 @@ spec:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "sleep 5"]
|
||||
|
||||
# Environment
|
||||
env:
|
||||
- name: SHUTDOWN_TIMEOUT
|
||||
value: "30"
|
||||
```
|
||||
|
||||
### Service
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: myapp
|
||||
spec:
|
||||
selector:
|
||||
app: myapp
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
type: LoadBalancer
|
||||
# Allow time for graceful shutdown
|
||||
terminationGracePeriodSeconds: 35
|
||||
```
|
||||
|
||||
## Docker Compose
|
||||
@@ -312,8 +377,70 @@ services:
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
stop_grace_period: 35s # Slightly longer than shutdown timeout
|
||||
stop_grace_period: 35s
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create server manager
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Register shutdown callbacks
|
||||
mgr.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
log.Println("Cleanup: Closing database...")
|
||||
// return db.Close()
|
||||
return nil
|
||||
})
|
||||
|
||||
// Create router
|
||||
router := http.NewServeMux()
|
||||
router.HandleFunc("/api/data", dataHandler)
|
||||
|
||||
// Add server
|
||||
instance, err := mgr.Add(server.Config{
|
||||
Name: "api-server",
|
||||
Host: "0.0.0.0",
|
||||
Port: 8080,
|
||||
Handler: router,
|
||||
GZIP: true,
|
||||
ShutdownTimeout: 30 * time.Second,
|
||||
DrainTimeout: 25 * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Add health endpoints
|
||||
router.HandleFunc("/health", instance.HealthCheckHandler())
|
||||
router.HandleFunc("/ready", instance.ReadinessHandler())
|
||||
|
||||
// Start and wait for shutdown
|
||||
log.Println("Starting server on :8080")
|
||||
if err := mgr.ServeWithGracefulShutdown(); err != nil {
|
||||
log.Printf("Server stopped: %v", err)
|
||||
}
|
||||
|
||||
log.Println("Server shutdown complete")
|
||||
}
|
||||
|
||||
func dataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(100 * time.Millisecond) // Simulate work
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"message":"success"}`))
|
||||
}
|
||||
```
|
||||
|
||||
## Testing Graceful Shutdown
|
||||
@@ -330,7 +457,7 @@ SERVER_PID=$!
|
||||
# Wait for server to start
|
||||
sleep 2
|
||||
|
||||
# Send some requests
|
||||
# Send requests
|
||||
for i in {1..10}; do
|
||||
curl http://localhost:8080/api/data &
|
||||
done
|
||||
@@ -341,7 +468,7 @@ sleep 1
|
||||
# Send shutdown signal
|
||||
kill -TERM $SERVER_PID
|
||||
|
||||
# Try to send more requests (should get 503)
|
||||
# Try more requests (should get 503)
|
||||
curl -v http://localhost:8080/api/data
|
||||
|
||||
# Wait for server to stop
|
||||
@@ -349,101 +476,13 @@ wait $SERVER_PID
|
||||
echo "Server stopped gracefully"
|
||||
```
|
||||
|
||||
### Expected Output
|
||||
|
||||
```
|
||||
Starting server on :8080
|
||||
Received signal: terminated, initiating graceful shutdown
|
||||
Starting graceful shutdown...
|
||||
Waiting for 8 in-flight requests to complete...
|
||||
Waiting for 4 in-flight requests to complete...
|
||||
Waiting for 1 in-flight requests to complete...
|
||||
All requests drained in 2.3s
|
||||
Cleanup: Flushing metrics...
|
||||
Cleanup: Closing database...
|
||||
Shutting down HTTP server...
|
||||
Graceful shutdown complete
|
||||
Server stopped
|
||||
```
|
||||
|
||||
## Monitoring In-Flight Requests
|
||||
|
||||
```go
|
||||
// Get current in-flight count
|
||||
count := srv.InFlightRequests()
|
||||
fmt.Printf("In-flight requests: %d\n", count)
|
||||
|
||||
// Check if shutting down
|
||||
if srv.IsShuttingDown() {
|
||||
fmt.Println("Server is shutting down")
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Shutdown Logic
|
||||
|
||||
```go
|
||||
// Implement custom shutdown
|
||||
go func() {
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-sigChan
|
||||
log.Println("Shutdown signal received")
|
||||
|
||||
// Custom pre-shutdown logic
|
||||
log.Println("Running custom cleanup...")
|
||||
|
||||
// Shutdown with callbacks
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := srv.ShutdownWithCallbacks(ctx); err != nil {
|
||||
log.Printf("Shutdown error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start server
|
||||
srv.server.ListenAndServe()
|
||||
```
|
||||
|
||||
### Multiple Servers
|
||||
|
||||
```go
|
||||
// HTTP server
|
||||
httpSrv := server.NewGracefulServer(server.Config{
|
||||
Addr: ":8080",
|
||||
Handler: httpRouter,
|
||||
})
|
||||
|
||||
// HTTPS server
|
||||
httpsSrv := server.NewGracefulServer(server.Config{
|
||||
Addr: ":8443",
|
||||
Handler: httpsRouter,
|
||||
})
|
||||
|
||||
// Start both
|
||||
go httpSrv.ListenAndServe()
|
||||
go httpsSrv.ListenAndServe()
|
||||
|
||||
// Shutdown both on signal
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt)
|
||||
<-sigChan
|
||||
|
||||
ctx := context.Background()
|
||||
httpSrv.Shutdown(ctx)
|
||||
httpsSrv.Shutdown(ctx)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Set appropriate timeouts**
|
||||
- `DrainTimeout` < `ShutdownTimeout`
|
||||
- `ShutdownTimeout` < Kubernetes `terminationGracePeriodSeconds`
|
||||
|
||||
2. **Register cleanup callbacks** for:
|
||||
2. **Use shutdown callbacks** for:
|
||||
- Database connections
|
||||
- Message queues
|
||||
- Metrics flushing
|
||||
@@ -458,7 +497,12 @@ httpsSrv.Shutdown(ctx)
|
||||
- Set `preStop` hook in Kubernetes (5-10s delay)
|
||||
- Allows load balancer to deregister before shutdown
|
||||
|
||||
5. **Monitoring**
|
||||
5. **HTTPS in production**
|
||||
- Use AutoTLS for public-facing services
|
||||
- Use certificate files for enterprise PKI
|
||||
- Use self-signed only for development/testing
|
||||
|
||||
6. **Monitoring**
|
||||
- Track in-flight requests in metrics
|
||||
- Alert on slow drains
|
||||
- Monitor shutdown duration
|
||||
@@ -470,24 +514,63 @@ httpsSrv.Shutdown(ctx)
|
||||
```go
|
||||
// Increase drain timeout
|
||||
config.DrainTimeout = 60 * time.Second
|
||||
config.ShutdownTimeout = 65 * time.Second
|
||||
```
|
||||
|
||||
### Requests Still Timing Out
|
||||
### Requests Timing Out
|
||||
|
||||
```go
|
||||
// Increase write timeout
|
||||
config.WriteTimeout = 30 * time.Second
|
||||
```
|
||||
|
||||
### Force Shutdown Not Working
|
||||
|
||||
The server will force shutdown after `ShutdownTimeout` even if requests are still in-flight. Adjust timeouts as needed.
|
||||
|
||||
### Debugging Shutdown
|
||||
### Certificate Issues
|
||||
|
||||
```go
|
||||
// Verify certificate files exist and are readable
|
||||
if _, err := os.Stat(config.SSLCert); err != nil {
|
||||
log.Fatalf("Certificate not found: %v", err)
|
||||
}
|
||||
|
||||
// For AutoTLS, ensure:
|
||||
// - Port 443 is accessible
|
||||
// - Domains resolve to server IP
|
||||
// - Cache directory is writable
|
||||
```
|
||||
|
||||
### Debug Logging
|
||||
|
||||
```go
|
||||
// Enable debug logging
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
|
||||
// Enable debug logging
|
||||
logger.SetLevel("debug")
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### Manager Methods
|
||||
|
||||
- `NewManager()` - Create new server manager
|
||||
- `Add(cfg Config)` - Register server instance
|
||||
- `Get(name string)` - Get server by name
|
||||
- `Remove(name string)` - Stop and remove server
|
||||
- `StartAll()` - Start all registered servers
|
||||
- `StopAll()` - Stop all servers gracefully
|
||||
- `StopAllWithContext(ctx)` - Stop with timeout
|
||||
- `RestartAll()` - Restart all servers
|
||||
- `List()` - Get all server instances
|
||||
- `ServeWithGracefulShutdown()` - Start and block until shutdown
|
||||
- `RegisterShutdownCallback(cb)` - Register cleanup function
|
||||
|
||||
### Instance Methods
|
||||
|
||||
- `Start()` - Start the server
|
||||
- `Stop(ctx)` - Stop gracefully
|
||||
- `Addr()` - Get server address
|
||||
- `Name()` - Get server name
|
||||
- `HealthCheckHandler()` - Get health handler
|
||||
- `ReadinessHandler()` - Get readiness handler
|
||||
- `InFlightRequests()` - Get in-flight count
|
||||
- `IsShuttingDown()` - Check shutdown status
|
||||
- `Wait()` - Block until shutdown complete
|
||||
|
||||
294
pkg/server/example_test.go
Normal file
294
pkg/server/example_test.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package server_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server"
|
||||
)
|
||||
|
||||
// ExampleManager_basic demonstrates basic server manager usage
|
||||
func ExampleManager_basic() {
|
||||
// Create a server manager
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Define a simple handler
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, "Hello from server!")
|
||||
})
|
||||
|
||||
// Add an HTTP server
|
||||
_, err := mgr.Add(server.Config{
|
||||
Name: "api-server",
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
Handler: handler,
|
||||
GZIP: true, // Enable GZIP compression
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Start all servers
|
||||
if err := mgr.StartAll(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Server is now running...
|
||||
// When done, stop gracefully
|
||||
if err := mgr.StopAll(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// ExampleManager_https demonstrates HTTPS configurations
|
||||
func ExampleManager_https() {
|
||||
mgr := server.NewManager()
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "Secure connection!")
|
||||
})
|
||||
|
||||
// Option 1: Use certificate files
|
||||
_, err := mgr.Add(server.Config{
|
||||
Name: "https-server-files",
|
||||
Host: "localhost",
|
||||
Port: 8443,
|
||||
Handler: handler,
|
||||
SSLCert: "/path/to/cert.pem",
|
||||
SSLKey: "/path/to/key.pem",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Option 2: Self-signed certificate (for development)
|
||||
_, err = mgr.Add(server.Config{
|
||||
Name: "https-server-self-signed",
|
||||
Host: "localhost",
|
||||
Port: 8444,
|
||||
Handler: handler,
|
||||
SelfSignedSSL: true,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Option 3: Let's Encrypt / AutoTLS (for production)
|
||||
_, err = mgr.Add(server.Config{
|
||||
Name: "https-server-letsencrypt",
|
||||
Host: "0.0.0.0",
|
||||
Port: 443,
|
||||
Handler: handler,
|
||||
AutoTLS: true,
|
||||
AutoTLSDomains: []string{"example.com", "www.example.com"},
|
||||
AutoTLSEmail: "admin@example.com",
|
||||
AutoTLSCacheDir: "./certs-cache",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Start all servers
|
||||
if err := mgr.StartAll(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
mgr.StopAll()
|
||||
}
|
||||
|
||||
// ExampleManager_gracefulShutdown demonstrates graceful shutdown with callbacks
|
||||
func ExampleManager_gracefulShutdown() {
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Register shutdown callbacks for cleanup tasks
|
||||
mgr.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
fmt.Println("Closing database connections...")
|
||||
// Close your database here
|
||||
return nil
|
||||
})
|
||||
|
||||
mgr.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
fmt.Println("Flushing metrics...")
|
||||
// Flush metrics here
|
||||
return nil
|
||||
})
|
||||
|
||||
// Add server with custom timeouts
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Simulate some work
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fmt.Fprintln(w, "Done!")
|
||||
})
|
||||
|
||||
_, err := mgr.Add(server.Config{
|
||||
Name: "api-server",
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
Handler: handler,
|
||||
ShutdownTimeout: 30 * time.Second, // Max time for shutdown
|
||||
DrainTimeout: 25 * time.Second, // Time to wait for in-flight requests
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 120 * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Start servers and block until shutdown signal (SIGINT/SIGTERM)
|
||||
// This will automatically handle graceful shutdown with callbacks
|
||||
if err := mgr.ServeWithGracefulShutdown(); err != nil {
|
||||
fmt.Printf("Shutdown completed: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ExampleManager_healthChecks demonstrates health and readiness endpoints
|
||||
func ExampleManager_healthChecks() {
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Create a router with health endpoints
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/api/data", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "Data endpoint")
|
||||
})
|
||||
|
||||
// Add server
|
||||
instance, err := mgr.Add(server.Config{
|
||||
Name: "api-server",
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
Handler: mux,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Add health and readiness endpoints
|
||||
mux.HandleFunc("/health", instance.HealthCheckHandler())
|
||||
mux.HandleFunc("/ready", instance.ReadinessHandler())
|
||||
|
||||
// Start the server
|
||||
if err := mgr.StartAll(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Health check returns:
|
||||
// - 200 OK with {"status":"healthy"} when healthy
|
||||
// - 503 Service Unavailable with {"status":"shutting_down"} when shutting down
|
||||
|
||||
// Readiness check returns:
|
||||
// - 200 OK with {"ready":true,"in_flight_requests":N} when ready
|
||||
// - 503 Service Unavailable with {"ready":false,"reason":"shutting_down"} when shutting down
|
||||
|
||||
// Cleanup
|
||||
mgr.StopAll()
|
||||
}
|
||||
|
||||
// ExampleManager_multipleServers demonstrates running multiple servers
|
||||
func ExampleManager_multipleServers() {
|
||||
mgr := server.NewManager()
|
||||
|
||||
// Public API server
|
||||
publicHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "Public API")
|
||||
})
|
||||
_, err := mgr.Add(server.Config{
|
||||
Name: "public-api",
|
||||
Host: "0.0.0.0",
|
||||
Port: 8080,
|
||||
Handler: publicHandler,
|
||||
GZIP: true,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Admin API server (different port)
|
||||
adminHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "Admin API")
|
||||
})
|
||||
_, err = mgr.Add(server.Config{
|
||||
Name: "admin-api",
|
||||
Host: "localhost",
|
||||
Port: 8081,
|
||||
Handler: adminHandler,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Metrics server (internal only)
|
||||
metricsHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "Metrics data")
|
||||
})
|
||||
_, err = mgr.Add(server.Config{
|
||||
Name: "metrics",
|
||||
Host: "127.0.0.1",
|
||||
Port: 9090,
|
||||
Handler: metricsHandler,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Start all servers at once
|
||||
if err := mgr.StartAll(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Get specific server instance
|
||||
publicInstance, err := mgr.Get("public-api")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Public API running on: %s\n", publicInstance.Addr())
|
||||
|
||||
// List all servers
|
||||
instances := mgr.List()
|
||||
fmt.Printf("Running %d servers\n", len(instances))
|
||||
|
||||
// Stop all servers gracefully (in parallel)
|
||||
if err := mgr.StopAll(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// ExampleManager_monitoring demonstrates monitoring server state
|
||||
func ExampleManager_monitoring() {
|
||||
mgr := server.NewManager()
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(50 * time.Millisecond) // Simulate work
|
||||
fmt.Fprintln(w, "Done")
|
||||
})
|
||||
|
||||
instance, err := mgr.Add(server.Config{
|
||||
Name: "api-server",
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
Handler: handler,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := mgr.StartAll(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Check server status
|
||||
fmt.Printf("Server address: %s\n", instance.Addr())
|
||||
fmt.Printf("Server name: %s\n", instance.Name())
|
||||
fmt.Printf("Is shutting down: %v\n", instance.IsShuttingDown())
|
||||
fmt.Printf("In-flight requests: %d\n", instance.InFlightRequests())
|
||||
|
||||
// Cleanup
|
||||
mgr.StopAll()
|
||||
|
||||
// Wait for complete shutdown
|
||||
instance.Wait()
|
||||
}
|
||||
137
pkg/server/interfaces.go
Normal file
137
pkg/server/interfaces.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config holds the configuration for a single web server instance.
|
||||
type Config struct {
|
||||
Name string
|
||||
Host string
|
||||
Port int
|
||||
Description string
|
||||
|
||||
// Handler is the http.Handler (e.g., a router) to be served.
|
||||
Handler http.Handler
|
||||
|
||||
// GZIP compression support
|
||||
GZIP bool
|
||||
|
||||
// TLS/HTTPS configuration options (mutually exclusive)
|
||||
// Option 1: Provide certificate and key files directly
|
||||
SSLCert string
|
||||
SSLKey string
|
||||
|
||||
// Option 2: Use self-signed certificate (for development/testing)
|
||||
// Generates a self-signed certificate automatically if no SSLCert/SSLKey provided
|
||||
SelfSignedSSL bool
|
||||
|
||||
// Option 3: Use Let's Encrypt / Certbot for automatic TLS
|
||||
// AutoTLS enables automatic certificate management via Let's Encrypt
|
||||
AutoTLS bool
|
||||
// AutoTLSDomains specifies the domains for Let's Encrypt certificates
|
||||
AutoTLSDomains []string
|
||||
// AutoTLSCacheDir specifies where to cache certificates (default: "./certs-cache")
|
||||
AutoTLSCacheDir string
|
||||
// AutoTLSEmail is the email for Let's Encrypt registration (optional but recommended)
|
||||
AutoTLSEmail string
|
||||
|
||||
// Graceful shutdown configuration
|
||||
// ShutdownTimeout is the maximum time to wait for graceful shutdown
|
||||
// Default: 30 seconds
|
||||
ShutdownTimeout time.Duration
|
||||
|
||||
// DrainTimeout is the time to wait for in-flight requests to complete
|
||||
// before forcing shutdown. Default: 25 seconds
|
||||
DrainTimeout time.Duration
|
||||
|
||||
// ReadTimeout is the maximum duration for reading the entire request
|
||||
// Default: 15 seconds
|
||||
ReadTimeout time.Duration
|
||||
|
||||
// WriteTimeout is the maximum duration before timing out writes of the response
|
||||
// Default: 15 seconds
|
||||
WriteTimeout time.Duration
|
||||
|
||||
// IdleTimeout is the maximum amount of time to wait for the next request
|
||||
// Default: 60 seconds
|
||||
IdleTimeout time.Duration
|
||||
}
|
||||
|
||||
// Instance defines the interface for a single server instance.
|
||||
// It abstracts the underlying http.Server, allowing for easier management and testing.
|
||||
type Instance interface {
|
||||
// Start begins serving requests. This method should be non-blocking and
|
||||
// run the server in a separate goroutine.
|
||||
Start() error
|
||||
|
||||
// Stop gracefully shuts down the server without interrupting any active connections.
|
||||
// It accepts a context to allow for a timeout.
|
||||
Stop(ctx context.Context) error
|
||||
|
||||
// Addr returns the network address the server is listening on.
|
||||
Addr() string
|
||||
|
||||
// Name returns the server instance name.
|
||||
Name() string
|
||||
|
||||
// HealthCheckHandler returns a handler that responds to health checks.
|
||||
// Returns 200 OK when healthy, 503 Service Unavailable when shutting down.
|
||||
HealthCheckHandler() http.HandlerFunc
|
||||
|
||||
// ReadinessHandler returns a handler for readiness checks.
|
||||
// Includes in-flight request count.
|
||||
ReadinessHandler() http.HandlerFunc
|
||||
|
||||
// InFlightRequests returns the current number of in-flight requests.
|
||||
InFlightRequests() int64
|
||||
|
||||
// IsShuttingDown returns true if the server is shutting down.
|
||||
IsShuttingDown() bool
|
||||
|
||||
// Wait blocks until shutdown is complete.
|
||||
Wait()
|
||||
}
|
||||
|
||||
// Manager defines the interface for a server manager.
|
||||
// It is responsible for managing the lifecycle of multiple server instances.
|
||||
type Manager interface {
|
||||
// Add registers a new server instance based on the provided configuration.
|
||||
// The server is not started until StartAll or Start is called on the instance.
|
||||
Add(cfg Config) (Instance, error)
|
||||
|
||||
// Get returns a server instance by its name.
|
||||
Get(name string) (Instance, error)
|
||||
|
||||
// Remove stops and removes a server instance by its name.
|
||||
Remove(name string) error
|
||||
|
||||
// StartAll starts all registered server instances that are not already running.
|
||||
StartAll() error
|
||||
|
||||
// StopAll gracefully shuts down all running server instances.
|
||||
// Executes shutdown callbacks and drains in-flight requests.
|
||||
StopAll() error
|
||||
|
||||
// StopAllWithContext gracefully shuts down all running server instances with a context.
|
||||
StopAllWithContext(ctx context.Context) error
|
||||
|
||||
// RestartAll gracefully restarts all running server instances.
|
||||
RestartAll() error
|
||||
|
||||
// List returns all registered server instances.
|
||||
List() []Instance
|
||||
|
||||
// ServeWithGracefulShutdown starts all servers and blocks until a shutdown signal is received.
|
||||
// It handles SIGINT and SIGTERM signals and performs graceful shutdown with callbacks.
|
||||
ServeWithGracefulShutdown() error
|
||||
|
||||
// RegisterShutdownCallback registers a callback to be called during shutdown.
|
||||
// Useful for cleanup tasks like closing database connections, flushing metrics, etc.
|
||||
RegisterShutdownCallback(cb ShutdownCallback)
|
||||
}
|
||||
|
||||
// ShutdownCallback is a function called during graceful shutdown.
|
||||
type ShutdownCallback func(context.Context) error
|
||||
572
pkg/server/manager.go
Normal file
572
pkg/server/manager.go
Normal file
@@ -0,0 +1,572 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/middleware"
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
)
|
||||
|
||||
// gracefulServer wraps http.Server with graceful shutdown capabilities (internal type)
|
||||
type gracefulServer struct {
|
||||
server *http.Server
|
||||
shutdownTimeout time.Duration
|
||||
drainTimeout time.Duration
|
||||
inFlightRequests atomic.Int64
|
||||
isShuttingDown atomic.Bool
|
||||
shutdownOnce sync.Once
|
||||
shutdownComplete chan struct{}
|
||||
}
|
||||
|
||||
// trackRequestsMiddleware tracks in-flight requests and blocks new requests during shutdown
|
||||
func (gs *gracefulServer) trackRequestsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check if shutting down
|
||||
if gs.isShuttingDown.Load() {
|
||||
http.Error(w, `{"error":"service_unavailable","message":"Server is shutting down"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
// Increment in-flight counter
|
||||
gs.inFlightRequests.Add(1)
|
||||
defer gs.inFlightRequests.Add(-1)
|
||||
|
||||
// Serve the request
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// shutdown performs graceful shutdown with request draining
|
||||
func (gs *gracefulServer) shutdown(ctx context.Context) error {
|
||||
var shutdownErr error
|
||||
|
||||
gs.shutdownOnce.Do(func() {
|
||||
logger.Info("Starting graceful shutdown...")
|
||||
|
||||
// Mark as shutting down (new requests will be rejected)
|
||||
gs.isShuttingDown.Store(true)
|
||||
|
||||
// Create context with timeout
|
||||
shutdownCtx, cancel := context.WithTimeout(ctx, gs.shutdownTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Wait for in-flight requests to complete (with drain timeout)
|
||||
drainCtx, drainCancel := context.WithTimeout(shutdownCtx, gs.drainTimeout)
|
||||
defer drainCancel()
|
||||
|
||||
shutdownErr = gs.drainRequests(drainCtx)
|
||||
if shutdownErr != nil {
|
||||
logger.Error("Error draining requests: %v", shutdownErr)
|
||||
}
|
||||
|
||||
// Shutdown the server
|
||||
logger.Info("Shutting down HTTP server...")
|
||||
if err := gs.server.Shutdown(shutdownCtx); err != nil {
|
||||
logger.Error("Error shutting down server: %v", err)
|
||||
if shutdownErr == nil {
|
||||
shutdownErr = err
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Graceful shutdown complete")
|
||||
close(gs.shutdownComplete)
|
||||
})
|
||||
|
||||
return shutdownErr
|
||||
}
|
||||
|
||||
// drainRequests waits for in-flight requests to complete
|
||||
func (gs *gracefulServer) drainRequests(ctx context.Context) error {
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
inFlight := gs.inFlightRequests.Load()
|
||||
|
||||
if inFlight == 0 {
|
||||
logger.Info("All requests drained in %v", time.Since(startTime))
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Warn("Drain timeout exceeded with %d requests still in flight", inFlight)
|
||||
return fmt.Errorf("drain timeout exceeded: %d requests still in flight", inFlight)
|
||||
case <-ticker.C:
|
||||
logger.Debug("Waiting for %d in-flight requests to complete...", inFlight)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// inFlightRequests returns the current number of in-flight requests
|
||||
func (gs *gracefulServer) inFlightRequestsCount() int64 {
|
||||
return gs.inFlightRequests.Load()
|
||||
}
|
||||
|
||||
// isShutdown returns true if the server is shutting down
|
||||
func (gs *gracefulServer) isShutdown() bool {
|
||||
return gs.isShuttingDown.Load()
|
||||
}
|
||||
|
||||
// wait blocks until shutdown is complete
|
||||
func (gs *gracefulServer) wait() {
|
||||
<-gs.shutdownComplete
|
||||
}
|
||||
|
||||
// healthCheckHandler returns a handler that responds to health checks
|
||||
func (gs *gracefulServer) healthCheckHandler() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if gs.isShutdown() {
|
||||
http.Error(w, `{"status":"shutting_down"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(`{"status":"healthy"}`))
|
||||
if err != nil {
|
||||
logger.Warn("Failed to write health check response: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readinessHandler returns a handler for readiness checks
|
||||
func (gs *gracefulServer) readinessHandler() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if gs.isShutdown() {
|
||||
http.Error(w, `{"ready":false,"reason":"shutting_down"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
inFlight := gs.inFlightRequestsCount()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, `{"ready":true,"in_flight_requests":%d}`, inFlight)
|
||||
}
|
||||
}
|
||||
|
||||
// serverManager manages a collection of server instances with graceful shutdown support.
|
||||
type serverManager struct {
|
||||
instances map[string]Instance
|
||||
mu sync.RWMutex
|
||||
shutdownCallbacks []ShutdownCallback
|
||||
callbacksMu sync.Mutex
|
||||
}
|
||||
|
||||
// NewManager creates a new server manager.
|
||||
func NewManager() Manager {
|
||||
return &serverManager{
|
||||
instances: make(map[string]Instance),
|
||||
shutdownCallbacks: make([]ShutdownCallback, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Add registers a new server instance.
|
||||
func (sm *serverManager) Add(cfg Config) (Instance, error) {
|
||||
sm.mu.Lock()
|
||||
defer sm.mu.Unlock()
|
||||
|
||||
if cfg.Name == "" {
|
||||
return nil, fmt.Errorf("server name cannot be empty")
|
||||
}
|
||||
if _, exists := sm.instances[cfg.Name]; exists {
|
||||
return nil, fmt.Errorf("server with name '%s' already exists", cfg.Name)
|
||||
}
|
||||
|
||||
instance, err := newInstance(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sm.instances[cfg.Name] = instance
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
// Get returns a server instance by its name.
|
||||
func (sm *serverManager) Get(name string) (Instance, error) {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
|
||||
instance, exists := sm.instances[name]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("server with name '%s' not found", name)
|
||||
}
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
// Remove stops and removes a server instance by its name.
|
||||
func (sm *serverManager) Remove(name string) error {
|
||||
sm.mu.Lock()
|
||||
defer sm.mu.Unlock()
|
||||
|
||||
instance, exists := sm.instances[name]
|
||||
if !exists {
|
||||
return fmt.Errorf("server with name '%s' not found", name)
|
||||
}
|
||||
|
||||
// Stop the server if it's running
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := instance.Stop(ctx); err != nil {
|
||||
logger.Warn("Failed to gracefully stop server '%s' on remove: %v", name, err)
|
||||
}
|
||||
|
||||
delete(sm.instances, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartAll starts all registered server instances.
|
||||
func (sm *serverManager) StartAll() error {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
|
||||
var startErrors []error
|
||||
for name, instance := range sm.instances {
|
||||
if err := instance.Start(); err != nil {
|
||||
startErrors = append(startErrors, fmt.Errorf("failed to start server '%s': %w", name, err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(startErrors) > 0 {
|
||||
return fmt.Errorf("encountered errors while starting servers: %v", startErrors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopAll gracefully shuts down all running server instances.
|
||||
func (sm *serverManager) StopAll() error {
|
||||
return sm.StopAllWithContext(context.Background())
|
||||
}
|
||||
|
||||
// StopAllWithContext gracefully shuts down all running server instances with a context.
|
||||
func (sm *serverManager) StopAllWithContext(ctx context.Context) error {
|
||||
sm.mu.RLock()
|
||||
instancesToStop := make([]Instance, 0, len(sm.instances))
|
||||
for _, instance := range sm.instances {
|
||||
instancesToStop = append(instancesToStop, instance)
|
||||
}
|
||||
sm.mu.RUnlock()
|
||||
|
||||
logger.Info("Shutting down all servers...")
|
||||
|
||||
// Execute shutdown callbacks first
|
||||
sm.callbacksMu.Lock()
|
||||
callbacks := make([]ShutdownCallback, len(sm.shutdownCallbacks))
|
||||
copy(callbacks, sm.shutdownCallbacks)
|
||||
sm.callbacksMu.Unlock()
|
||||
|
||||
if len(callbacks) > 0 {
|
||||
logger.Info("Executing %d shutdown callbacks...", len(callbacks))
|
||||
for i, cb := range callbacks {
|
||||
if err := cb(ctx); err != nil {
|
||||
logger.Error("Shutdown callback %d failed: %v", i+1, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop all instances in parallel
|
||||
var shutdownErrors []error
|
||||
var wg sync.WaitGroup
|
||||
var errorsMu sync.Mutex
|
||||
|
||||
for _, instance := range instancesToStop {
|
||||
wg.Add(1)
|
||||
go func(inst Instance) {
|
||||
defer wg.Done()
|
||||
shutdownCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
if err := inst.Stop(shutdownCtx); err != nil {
|
||||
errorsMu.Lock()
|
||||
shutdownErrors = append(shutdownErrors, fmt.Errorf("failed to stop server '%s': %w", inst.Name(), err))
|
||||
errorsMu.Unlock()
|
||||
}
|
||||
}(instance)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(shutdownErrors) > 0 {
|
||||
return fmt.Errorf("encountered errors while stopping servers: %v", shutdownErrors)
|
||||
}
|
||||
logger.Info("All servers stopped gracefully.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartAll gracefully restarts all running server instances.
|
||||
func (sm *serverManager) RestartAll() error {
|
||||
logger.Info("Restarting all servers...")
|
||||
if err := sm.StopAll(); err != nil {
|
||||
return fmt.Errorf("failed to stop servers during restart: %w", err)
|
||||
}
|
||||
|
||||
// Give ports time to be released
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if err := sm.StartAll(); err != nil {
|
||||
return fmt.Errorf("failed to start servers during restart: %w", err)
|
||||
}
|
||||
logger.Info("All servers restarted successfully.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns all registered server instances.
|
||||
func (sm *serverManager) List() []Instance {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
|
||||
instances := make([]Instance, 0, len(sm.instances))
|
||||
for _, instance := range sm.instances {
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
return instances
|
||||
}
|
||||
|
||||
// RegisterShutdownCallback registers a callback to be called during shutdown.
|
||||
func (sm *serverManager) RegisterShutdownCallback(cb ShutdownCallback) {
|
||||
sm.callbacksMu.Lock()
|
||||
defer sm.callbacksMu.Unlock()
|
||||
sm.shutdownCallbacks = append(sm.shutdownCallbacks, cb)
|
||||
}
|
||||
|
||||
// ServeWithGracefulShutdown starts all servers and blocks until a shutdown signal is received.
|
||||
func (sm *serverManager) ServeWithGracefulShutdown() error {
|
||||
// Start all servers
|
||||
if err := sm.StartAll(); err != nil {
|
||||
return fmt.Errorf("failed to start servers: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("All servers started. Waiting for shutdown signal...")
|
||||
|
||||
// Wait for interrupt signal
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
|
||||
|
||||
sig := <-sigChan
|
||||
logger.Info("Received signal: %v, initiating graceful shutdown", sig)
|
||||
|
||||
// Create context with timeout for shutdown
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return sm.StopAllWithContext(ctx)
|
||||
}
|
||||
|
||||
// serverInstance is a concrete implementation of the Instance interface.
|
||||
// It wraps gracefulServer to provide graceful shutdown capabilities.
|
||||
type serverInstance struct {
|
||||
cfg Config
|
||||
gracefulServer *gracefulServer
|
||||
certFile string // Path to certificate file (may be temporary for self-signed)
|
||||
keyFile string // Path to key file (may be temporary for self-signed)
|
||||
mu sync.RWMutex
|
||||
running bool
|
||||
serverErr chan error
|
||||
}
|
||||
|
||||
// newInstance creates a new, unstarted server instance from a config.
|
||||
func newInstance(cfg Config) (*serverInstance, error) {
|
||||
if cfg.Handler == nil {
|
||||
return nil, fmt.Errorf("handler cannot be nil")
|
||||
}
|
||||
|
||||
// Set default timeouts
|
||||
if cfg.ShutdownTimeout == 0 {
|
||||
cfg.ShutdownTimeout = 30 * time.Second
|
||||
}
|
||||
if cfg.DrainTimeout == 0 {
|
||||
cfg.DrainTimeout = 25 * time.Second
|
||||
}
|
||||
if cfg.ReadTimeout == 0 {
|
||||
cfg.ReadTimeout = 15 * time.Second
|
||||
}
|
||||
if cfg.WriteTimeout == 0 {
|
||||
cfg.WriteTimeout = 15 * time.Second
|
||||
}
|
||||
if cfg.IdleTimeout == 0 {
|
||||
cfg.IdleTimeout = 60 * time.Second
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
|
||||
var handler http.Handler = cfg.Handler
|
||||
|
||||
// Wrap with GZIP handler if enabled
|
||||
if cfg.GZIP {
|
||||
gz, err := gzhttp.NewWrapper()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create GZIP wrapper: %w", err)
|
||||
}
|
||||
handler = gz(handler)
|
||||
}
|
||||
|
||||
// Wrap with the panic recovery middleware
|
||||
handler = middleware.PanicRecovery(handler)
|
||||
|
||||
// Configure TLS if any TLS option is enabled
|
||||
tlsConfig, certFile, keyFile, err := configureTLS(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure TLS: %w", err)
|
||||
}
|
||||
|
||||
// Create gracefulServer
|
||||
gracefulSrv := &gracefulServer{
|
||||
server: &http.Server{
|
||||
Addr: addr,
|
||||
Handler: handler,
|
||||
ReadTimeout: cfg.ReadTimeout,
|
||||
WriteTimeout: cfg.WriteTimeout,
|
||||
IdleTimeout: cfg.IdleTimeout,
|
||||
TLSConfig: tlsConfig,
|
||||
},
|
||||
shutdownTimeout: cfg.ShutdownTimeout,
|
||||
drainTimeout: cfg.DrainTimeout,
|
||||
shutdownComplete: make(chan struct{}),
|
||||
}
|
||||
|
||||
return &serverInstance{
|
||||
cfg: cfg,
|
||||
gracefulServer: gracefulSrv,
|
||||
certFile: certFile,
|
||||
keyFile: keyFile,
|
||||
serverErr: make(chan error, 1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start begins serving requests in a new goroutine.
|
||||
func (s *serverInstance) Start() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.running {
|
||||
return fmt.Errorf("server '%s' is already running", s.cfg.Name)
|
||||
}
|
||||
|
||||
// Determine if we're using TLS
|
||||
useTLS := s.cfg.SSLCert != "" || s.cfg.SSLKey != "" || s.cfg.SelfSignedSSL || s.cfg.AutoTLS
|
||||
|
||||
// Wrap handler with request tracking
|
||||
s.gracefulServer.server.Handler = s.gracefulServer.trackRequestsMiddleware(s.gracefulServer.server.Handler)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
s.running = false
|
||||
s.mu.Unlock()
|
||||
logger.Info("Server '%s' stopped.", s.cfg.Name)
|
||||
}()
|
||||
|
||||
var err error
|
||||
protocol := "HTTP"
|
||||
|
||||
if useTLS {
|
||||
protocol = "HTTPS"
|
||||
logger.Info("Starting %s server '%s' on %s", protocol, s.cfg.Name, s.Addr())
|
||||
|
||||
// For AutoTLS, we need to use a TLS listener
|
||||
if s.cfg.AutoTLS {
|
||||
// Create listener
|
||||
ln, lnErr := net.Listen("tcp", s.gracefulServer.server.Addr)
|
||||
if lnErr != nil {
|
||||
err = fmt.Errorf("failed to create listener: %w", lnErr)
|
||||
} else {
|
||||
// Wrap with TLS
|
||||
tlsListener := tls.NewListener(ln, s.gracefulServer.server.TLSConfig)
|
||||
err = s.gracefulServer.server.Serve(tlsListener)
|
||||
}
|
||||
} else {
|
||||
// Use certificate files (regular SSL or self-signed)
|
||||
err = s.gracefulServer.server.ListenAndServeTLS(s.certFile, s.keyFile)
|
||||
}
|
||||
} else {
|
||||
logger.Info("Starting %s server '%s' on %s", protocol, s.cfg.Name, s.Addr())
|
||||
err = s.gracefulServer.server.ListenAndServe()
|
||||
}
|
||||
|
||||
// If the server stopped for a reason other than a graceful shutdown, log and report the error.
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
logger.Error("Server '%s' failed: %v", s.cfg.Name, err)
|
||||
select {
|
||||
case s.serverErr <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
s.running = true
|
||||
// A small delay to allow the goroutine to start and potentially fail on binding.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Check if the server failed to start
|
||||
select {
|
||||
case err := <-s.serverErr:
|
||||
s.running = false
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the server.
|
||||
func (s *serverInstance) Stop(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if !s.running {
|
||||
return nil // Already stopped
|
||||
}
|
||||
|
||||
logger.Info("Gracefully shutting down server '%s'...", s.cfg.Name)
|
||||
err := s.gracefulServer.shutdown(ctx)
|
||||
if err == nil {
|
||||
s.running = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Addr returns the network address the server is listening on.
|
||||
func (s *serverInstance) Addr() string {
|
||||
return s.gracefulServer.server.Addr
|
||||
}
|
||||
|
||||
// Name returns the server instance name.
|
||||
func (s *serverInstance) Name() string {
|
||||
return s.cfg.Name
|
||||
}
|
||||
|
||||
// HealthCheckHandler returns a handler that responds to health checks.
|
||||
func (s *serverInstance) HealthCheckHandler() http.HandlerFunc {
|
||||
return s.gracefulServer.healthCheckHandler()
|
||||
}
|
||||
|
||||
// ReadinessHandler returns a handler for readiness checks.
|
||||
func (s *serverInstance) ReadinessHandler() http.HandlerFunc {
|
||||
return s.gracefulServer.readinessHandler()
|
||||
}
|
||||
|
||||
// InFlightRequests returns the current number of in-flight requests.
|
||||
func (s *serverInstance) InFlightRequests() int64 {
|
||||
return s.gracefulServer.inFlightRequestsCount()
|
||||
}
|
||||
|
||||
// IsShuttingDown returns true if the server is shutting down.
|
||||
func (s *serverInstance) IsShuttingDown() bool {
|
||||
return s.gracefulServer.isShutdown()
|
||||
}
|
||||
|
||||
// Wait blocks until shutdown is complete.
|
||||
func (s *serverInstance) Wait() {
|
||||
s.gracefulServer.wait()
|
||||
}
|
||||
328
pkg/server/manager_test.go
Normal file
328
pkg/server/manager_test.go
Normal file
@@ -0,0 +1,328 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// getFreePort asks the kernel for a free open port that is ready to use.
|
||||
func getFreePort(t *testing.T) int {
|
||||
t.Helper()
|
||||
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
l, err := net.ListenTCP("tcp", addr)
|
||||
require.NoError(t, err)
|
||||
defer l.Close()
|
||||
return l.Addr().(*net.TCPAddr).Port
|
||||
}
|
||||
|
||||
func TestServerManagerLifecycle(t *testing.T) {
|
||||
// Initialize logger for test output
|
||||
logger.Init(true)
|
||||
|
||||
// Create a new server manager
|
||||
sm := NewManager()
|
||||
|
||||
// Define a simple test handler
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("Hello, World!"))
|
||||
})
|
||||
|
||||
// Get a free port for the server to listen on to avoid conflicts
|
||||
testPort := getFreePort(t)
|
||||
|
||||
// Add a new server configuration
|
||||
serverConfig := Config{
|
||||
Name: "TestServer",
|
||||
Host: "localhost",
|
||||
Port: testPort,
|
||||
Handler: testHandler,
|
||||
}
|
||||
instance, err := sm.Add(serverConfig)
|
||||
require.NoError(t, err, "should be able to add a new server")
|
||||
require.NotNil(t, instance, "added instance should not be nil")
|
||||
|
||||
// --- Test StartAll ---
|
||||
err = sm.StartAll()
|
||||
require.NoError(t, err, "StartAll should not return an error")
|
||||
|
||||
// Give the server a moment to start up
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// --- Verify Server is Running ---
|
||||
client := &http.Client{Timeout: 2 * time.Second}
|
||||
url := fmt.Sprintf("http://localhost:%d", testPort)
|
||||
resp, err := client.Get(url)
|
||||
require.NoError(t, err, "should be able to make a request to the running server")
|
||||
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode, "expected status OK from the test server")
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
assert.Equal(t, "Hello, World!", string(body), "response body should match expected value")
|
||||
|
||||
// --- Test Get ---
|
||||
retrievedInstance, err := sm.Get("TestServer")
|
||||
require.NoError(t, err, "should be able to get server by name")
|
||||
assert.Equal(t, instance.Addr(), retrievedInstance.Addr(), "retrieved instance should be the same")
|
||||
|
||||
// --- Test List ---
|
||||
instanceList := sm.List()
|
||||
require.Len(t, instanceList, 1, "list should contain one instance")
|
||||
assert.Equal(t, instance.Addr(), instanceList[0].Addr(), "listed instance should be the same")
|
||||
|
||||
// --- Test StopAll ---
|
||||
err = sm.StopAll()
|
||||
require.NoError(t, err, "StopAll should not return an error")
|
||||
|
||||
// Give the server a moment to shut down
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// --- Verify Server is Stopped ---
|
||||
_, err = client.Get(url)
|
||||
require.Error(t, err, "should not be able to make a request to a stopped server")
|
||||
|
||||
// --- Test Remove ---
|
||||
err = sm.Remove("TestServer")
|
||||
require.NoError(t, err, "should be able to remove a server")
|
||||
|
||||
_, err = sm.Get("TestServer")
|
||||
require.Error(t, err, "should not be able to get a removed server")
|
||||
}
|
||||
|
||||
func TestManagerErrorCases(t *testing.T) {
|
||||
logger.Init(true)
|
||||
sm := NewManager()
|
||||
testPort := getFreePort(t)
|
||||
|
||||
// --- Test Add Duplicate Name ---
|
||||
config1 := Config{Name: "Duplicate", Host: "localhost", Port: testPort, Handler: http.NewServeMux()}
|
||||
_, err := sm.Add(config1)
|
||||
require.NoError(t, err)
|
||||
|
||||
config2 := Config{Name: "Duplicate", Host: "localhost", Port: getFreePort(t), Handler: http.NewServeMux()}
|
||||
_, err = sm.Add(config2)
|
||||
require.Error(t, err, "should not be able to add a server with a duplicate name")
|
||||
|
||||
// --- Test Get Non-existent ---
|
||||
_, err = sm.Get("NonExistent")
|
||||
require.Error(t, err, "should get an error for a non-existent server")
|
||||
|
||||
// --- Test Add with Nil Handler ---
|
||||
config3 := Config{Name: "NilHandler", Host: "localhost", Port: getFreePort(t), Handler: nil}
|
||||
_, err = sm.Add(config3)
|
||||
require.Error(t, err, "should not be able to add a server with a nil handler")
|
||||
}
|
||||
|
||||
func TestGracefulShutdown(t *testing.T) {
|
||||
logger.Init(true)
|
||||
sm := NewManager()
|
||||
|
||||
requestsHandled := 0
|
||||
var requestsMu sync.Mutex
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
requestsMu.Lock()
|
||||
requestsHandled++
|
||||
requestsMu.Unlock()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
testPort := getFreePort(t)
|
||||
instance, err := sm.Add(Config{
|
||||
Name: "TestServer",
|
||||
Host: "localhost",
|
||||
Port: testPort,
|
||||
Handler: handler,
|
||||
DrainTimeout: 2 * time.Second,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sm.StartAll()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Give server time to start
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Send some concurrent requests
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 5; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
client := &http.Client{Timeout: 5 * time.Second}
|
||||
url := fmt.Sprintf("http://localhost:%d", testPort)
|
||||
resp, err := client.Get(url)
|
||||
if err == nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait a bit for requests to start
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Check in-flight requests
|
||||
inFlight := instance.InFlightRequests()
|
||||
assert.Greater(t, inFlight, int64(0), "Should have in-flight requests")
|
||||
|
||||
// Stop the server
|
||||
err = sm.StopAll()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for all requests to complete
|
||||
wg.Wait()
|
||||
|
||||
// Verify all requests were handled
|
||||
requestsMu.Lock()
|
||||
handled := requestsHandled
|
||||
requestsMu.Unlock()
|
||||
assert.GreaterOrEqual(t, handled, 1, "At least some requests should have been handled")
|
||||
|
||||
// Verify no in-flight requests
|
||||
assert.Equal(t, int64(0), instance.InFlightRequests(), "Should have no in-flight requests after shutdown")
|
||||
}
|
||||
|
||||
func TestHealthAndReadinessEndpoints(t *testing.T) {
|
||||
logger.Init(true)
|
||||
sm := NewManager()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
testPort := getFreePort(t)
|
||||
|
||||
instance, err := sm.Add(Config{
|
||||
Name: "TestServer",
|
||||
Host: "localhost",
|
||||
Port: testPort,
|
||||
Handler: mux,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add health and readiness endpoints
|
||||
mux.HandleFunc("/health", instance.HealthCheckHandler())
|
||||
mux.HandleFunc("/ready", instance.ReadinessHandler())
|
||||
|
||||
err = sm.StartAll()
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
client := &http.Client{Timeout: 2 * time.Second}
|
||||
baseURL := fmt.Sprintf("http://localhost:%d", testPort)
|
||||
|
||||
// Test health endpoint
|
||||
resp, err := client.Get(baseURL + "/health")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
assert.Contains(t, string(body), "healthy")
|
||||
|
||||
// Test readiness endpoint
|
||||
resp, err = client.Get(baseURL + "/ready")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, _ = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
assert.Contains(t, string(body), "ready")
|
||||
assert.Contains(t, string(body), "in_flight_requests")
|
||||
|
||||
// Stop the server
|
||||
sm.StopAll()
|
||||
}
|
||||
|
||||
func TestRequestRejectionDuringShutdown(t *testing.T) {
|
||||
logger.Init(true)
|
||||
sm := NewManager()
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
testPort := getFreePort(t)
|
||||
_, err := sm.Add(Config{
|
||||
Name: "TestServer",
|
||||
Host: "localhost",
|
||||
Port: testPort,
|
||||
Handler: handler,
|
||||
DrainTimeout: 1 * time.Second,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sm.StartAll()
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Start shutdown in background
|
||||
go func() {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
sm.StopAll()
|
||||
}()
|
||||
|
||||
// Give shutdown time to start
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Try to make a request after shutdown started
|
||||
client := &http.Client{Timeout: 2 * time.Second}
|
||||
url := fmt.Sprintf("http://localhost:%d", testPort)
|
||||
resp, err := client.Get(url)
|
||||
|
||||
// The request should either fail (connection refused) or get 503
|
||||
if err == nil {
|
||||
assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode, "Should get 503 during shutdown")
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestShutdownCallbacks(t *testing.T) {
|
||||
logger.Init(true)
|
||||
sm := NewManager()
|
||||
|
||||
callbackExecuted := false
|
||||
var callbackMu sync.Mutex
|
||||
|
||||
sm.RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
callbackMu.Lock()
|
||||
callbackExecuted = true
|
||||
callbackMu.Unlock()
|
||||
return nil
|
||||
})
|
||||
|
||||
testPort := getFreePort(t)
|
||||
_, err := sm.Add(Config{
|
||||
Name: "TestServer",
|
||||
Host: "localhost",
|
||||
Port: testPort,
|
||||
Handler: http.NewServeMux(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sm.StartAll()
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
err = sm.StopAll()
|
||||
require.NoError(t, err)
|
||||
|
||||
callbackMu.Lock()
|
||||
executed := callbackExecuted
|
||||
callbackMu.Unlock()
|
||||
|
||||
assert.True(t, executed, "Shutdown callback should have been executed")
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// GracefulServer wraps http.Server with graceful shutdown capabilities
|
||||
type GracefulServer struct {
|
||||
server *http.Server
|
||||
shutdownTimeout time.Duration
|
||||
drainTimeout time.Duration
|
||||
inFlightRequests atomic.Int64
|
||||
isShuttingDown atomic.Bool
|
||||
shutdownOnce sync.Once
|
||||
shutdownComplete chan struct{}
|
||||
}
|
||||
|
||||
// Config holds configuration for the graceful server
|
||||
type Config struct {
|
||||
// Addr is the server address (e.g., ":8080")
|
||||
Addr string
|
||||
|
||||
// Handler is the HTTP handler
|
||||
Handler http.Handler
|
||||
|
||||
// ShutdownTimeout is the maximum time to wait for graceful shutdown
|
||||
// Default: 30 seconds
|
||||
ShutdownTimeout time.Duration
|
||||
|
||||
// DrainTimeout is the time to wait for in-flight requests to complete
|
||||
// before forcing shutdown. Default: 25 seconds
|
||||
DrainTimeout time.Duration
|
||||
|
||||
// ReadTimeout is the maximum duration for reading the entire request
|
||||
ReadTimeout time.Duration
|
||||
|
||||
// WriteTimeout is the maximum duration before timing out writes of the response
|
||||
WriteTimeout time.Duration
|
||||
|
||||
// IdleTimeout is the maximum amount of time to wait for the next request
|
||||
IdleTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewGracefulServer creates a new graceful server
|
||||
func NewGracefulServer(config Config) *GracefulServer {
|
||||
if config.ShutdownTimeout == 0 {
|
||||
config.ShutdownTimeout = 30 * time.Second
|
||||
}
|
||||
if config.DrainTimeout == 0 {
|
||||
config.DrainTimeout = 25 * time.Second
|
||||
}
|
||||
if config.ReadTimeout == 0 {
|
||||
config.ReadTimeout = 10 * time.Second
|
||||
}
|
||||
if config.WriteTimeout == 0 {
|
||||
config.WriteTimeout = 10 * time.Second
|
||||
}
|
||||
if config.IdleTimeout == 0 {
|
||||
config.IdleTimeout = 120 * time.Second
|
||||
}
|
||||
|
||||
gs := &GracefulServer{
|
||||
server: &http.Server{
|
||||
Addr: config.Addr,
|
||||
Handler: config.Handler,
|
||||
ReadTimeout: config.ReadTimeout,
|
||||
WriteTimeout: config.WriteTimeout,
|
||||
IdleTimeout: config.IdleTimeout,
|
||||
},
|
||||
shutdownTimeout: config.ShutdownTimeout,
|
||||
drainTimeout: config.DrainTimeout,
|
||||
shutdownComplete: make(chan struct{}),
|
||||
}
|
||||
|
||||
return gs
|
||||
}
|
||||
|
||||
// TrackRequestsMiddleware tracks in-flight requests and blocks new requests during shutdown
|
||||
func (gs *GracefulServer) TrackRequestsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check if shutting down
|
||||
if gs.isShuttingDown.Load() {
|
||||
http.Error(w, `{"error":"service_unavailable","message":"Server is shutting down"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
// Increment in-flight counter
|
||||
gs.inFlightRequests.Add(1)
|
||||
defer gs.inFlightRequests.Add(-1)
|
||||
|
||||
// Serve the request
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// ListenAndServe starts the server and handles graceful shutdown
|
||||
func (gs *GracefulServer) ListenAndServe() error {
|
||||
// Wrap handler with request tracking
|
||||
gs.server.Handler = gs.TrackRequestsMiddleware(gs.server.Handler)
|
||||
|
||||
// Start server in goroutine
|
||||
serverErr := make(chan error, 1)
|
||||
go func() {
|
||||
logger.Info("Starting server on %s", gs.server.Addr)
|
||||
if err := gs.server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
serverErr <- err
|
||||
}
|
||||
close(serverErr)
|
||||
}()
|
||||
|
||||
// Wait for interrupt signal
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
|
||||
|
||||
select {
|
||||
case err := <-serverErr:
|
||||
return err
|
||||
case sig := <-sigChan:
|
||||
logger.Info("Received signal: %v, initiating graceful shutdown", sig)
|
||||
return gs.Shutdown(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown performs graceful shutdown with request draining
|
||||
func (gs *GracefulServer) Shutdown(ctx context.Context) error {
|
||||
var shutdownErr error
|
||||
|
||||
gs.shutdownOnce.Do(func() {
|
||||
logger.Info("Starting graceful shutdown...")
|
||||
|
||||
// Mark as shutting down (new requests will be rejected)
|
||||
gs.isShuttingDown.Store(true)
|
||||
|
||||
// Create context with timeout
|
||||
shutdownCtx, cancel := context.WithTimeout(ctx, gs.shutdownTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Wait for in-flight requests to complete (with drain timeout)
|
||||
drainCtx, drainCancel := context.WithTimeout(shutdownCtx, gs.drainTimeout)
|
||||
defer drainCancel()
|
||||
|
||||
shutdownErr = gs.drainRequests(drainCtx)
|
||||
if shutdownErr != nil {
|
||||
logger.Error("Error draining requests: %v", shutdownErr)
|
||||
}
|
||||
|
||||
// Shutdown the server
|
||||
logger.Info("Shutting down HTTP server...")
|
||||
if err := gs.server.Shutdown(shutdownCtx); err != nil {
|
||||
logger.Error("Error shutting down server: %v", err)
|
||||
if shutdownErr == nil {
|
||||
shutdownErr = err
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Graceful shutdown complete")
|
||||
close(gs.shutdownComplete)
|
||||
})
|
||||
|
||||
return shutdownErr
|
||||
}
|
||||
|
||||
// drainRequests waits for in-flight requests to complete
|
||||
func (gs *GracefulServer) drainRequests(ctx context.Context) error {
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
inFlight := gs.inFlightRequests.Load()
|
||||
|
||||
if inFlight == 0 {
|
||||
logger.Info("All requests drained in %v", time.Since(startTime))
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Warn("Drain timeout exceeded with %d requests still in flight", inFlight)
|
||||
return fmt.Errorf("drain timeout exceeded: %d requests still in flight", inFlight)
|
||||
case <-ticker.C:
|
||||
logger.Debug("Waiting for %d in-flight requests to complete...", inFlight)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InFlightRequests returns the current number of in-flight requests
|
||||
func (gs *GracefulServer) InFlightRequests() int64 {
|
||||
return gs.inFlightRequests.Load()
|
||||
}
|
||||
|
||||
// IsShuttingDown returns true if the server is shutting down
|
||||
func (gs *GracefulServer) IsShuttingDown() bool {
|
||||
return gs.isShuttingDown.Load()
|
||||
}
|
||||
|
||||
// Wait blocks until shutdown is complete
|
||||
func (gs *GracefulServer) Wait() {
|
||||
<-gs.shutdownComplete
|
||||
}
|
||||
|
||||
// HealthCheckHandler returns a handler that responds to health checks
|
||||
// Returns 200 OK when healthy, 503 Service Unavailable when shutting down
|
||||
func (gs *GracefulServer) HealthCheckHandler() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if gs.IsShuttingDown() {
|
||||
http.Error(w, `{"status":"shutting_down"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(`{"status":"healthy"}`))
|
||||
if err != nil {
|
||||
logger.Warn("Failed to write. %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReadinessHandler returns a handler for readiness checks
|
||||
// Includes in-flight request count
|
||||
func (gs *GracefulServer) ReadinessHandler() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if gs.IsShuttingDown() {
|
||||
http.Error(w, `{"ready":false,"reason":"shutting_down"}`, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
inFlight := gs.InFlightRequests()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, `{"ready":true,"in_flight_requests":%d}`, inFlight)
|
||||
}
|
||||
}
|
||||
|
||||
// ShutdownCallback is a function called during shutdown
|
||||
type ShutdownCallback func(context.Context) error
|
||||
|
||||
// shutdownCallbacks stores registered shutdown callbacks
|
||||
var (
|
||||
shutdownCallbacks []ShutdownCallback
|
||||
shutdownCallbacksMu sync.Mutex
|
||||
)
|
||||
|
||||
// RegisterShutdownCallback registers a callback to be called during shutdown
|
||||
// Useful for cleanup tasks like closing database connections, flushing metrics, etc.
|
||||
func RegisterShutdownCallback(cb ShutdownCallback) {
|
||||
shutdownCallbacksMu.Lock()
|
||||
defer shutdownCallbacksMu.Unlock()
|
||||
shutdownCallbacks = append(shutdownCallbacks, cb)
|
||||
}
|
||||
|
||||
// executeShutdownCallbacks runs all registered shutdown callbacks
|
||||
func executeShutdownCallbacks(ctx context.Context) error {
|
||||
shutdownCallbacksMu.Lock()
|
||||
callbacks := make([]ShutdownCallback, len(shutdownCallbacks))
|
||||
copy(callbacks, shutdownCallbacks)
|
||||
shutdownCallbacksMu.Unlock()
|
||||
|
||||
var errors []error
|
||||
for i, cb := range callbacks {
|
||||
logger.Debug("Executing shutdown callback %d/%d", i+1, len(callbacks))
|
||||
if err := cb(ctx); err != nil {
|
||||
logger.Error("Shutdown callback %d failed: %v", i+1, err)
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("shutdown callbacks failed: %v", errors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShutdownWithCallbacks performs shutdown and executes all registered callbacks
|
||||
func (gs *GracefulServer) ShutdownWithCallbacks(ctx context.Context) error {
|
||||
// Execute callbacks first
|
||||
if err := executeShutdownCallbacks(ctx); err != nil {
|
||||
logger.Error("Error executing shutdown callbacks: %v", err)
|
||||
}
|
||||
|
||||
// Then shutdown the server
|
||||
return gs.Shutdown(ctx)
|
||||
}
|
||||
@@ -1,231 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGracefulServerTrackRequests(t *testing.T) {
|
||||
srv := NewGracefulServer(Config{
|
||||
Addr: ":0",
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}),
|
||||
})
|
||||
|
||||
handler := srv.TrackRequestsMiddleware(srv.server.Handler)
|
||||
|
||||
// Start some requests
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 5; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait a bit for requests to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Check in-flight count
|
||||
inFlight := srv.InFlightRequests()
|
||||
if inFlight == 0 {
|
||||
t.Error("Should have in-flight requests")
|
||||
}
|
||||
|
||||
// Wait for all requests to complete
|
||||
wg.Wait()
|
||||
|
||||
// Check that counter is back to zero
|
||||
inFlight = srv.InFlightRequests()
|
||||
if inFlight != 0 {
|
||||
t.Errorf("In-flight requests should be 0, got %d", inFlight)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGracefulServerRejectsRequestsDuringShutdown(t *testing.T) {
|
||||
srv := NewGracefulServer(Config{
|
||||
Addr: ":0",
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}),
|
||||
})
|
||||
|
||||
handler := srv.TrackRequestsMiddleware(srv.server.Handler)
|
||||
|
||||
// Mark as shutting down
|
||||
srv.isShuttingDown.Store(true)
|
||||
|
||||
// Try to make a request
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
|
||||
// Should get 503
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Errorf("Expected 503, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckHandler(t *testing.T) {
|
||||
srv := NewGracefulServer(Config{
|
||||
Addr: ":0",
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),
|
||||
})
|
||||
|
||||
handler := srv.HealthCheckHandler()
|
||||
|
||||
// Healthy
|
||||
t.Run("Healthy", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/health", nil)
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("Expected 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
if w.Body.String() != `{"status":"healthy"}` {
|
||||
t.Errorf("Unexpected body: %s", w.Body.String())
|
||||
}
|
||||
})
|
||||
|
||||
// Shutting down
|
||||
t.Run("ShuttingDown", func(t *testing.T) {
|
||||
srv.isShuttingDown.Store(true)
|
||||
|
||||
req := httptest.NewRequest("GET", "/health", nil)
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Errorf("Expected 503, got %d", w.Code)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadinessHandler(t *testing.T) {
|
||||
srv := NewGracefulServer(Config{
|
||||
Addr: ":0",
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),
|
||||
})
|
||||
|
||||
handler := srv.ReadinessHandler()
|
||||
|
||||
// Ready with no in-flight requests
|
||||
t.Run("Ready", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/ready", nil)
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("Expected 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
body := w.Body.String()
|
||||
if body != `{"ready":true,"in_flight_requests":0}` {
|
||||
t.Errorf("Unexpected body: %s", body)
|
||||
}
|
||||
})
|
||||
|
||||
// Not ready during shutdown
|
||||
t.Run("NotReady", func(t *testing.T) {
|
||||
srv.isShuttingDown.Store(true)
|
||||
|
||||
req := httptest.NewRequest("GET", "/ready", nil)
|
||||
w := httptest.NewRecorder()
|
||||
handler.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusServiceUnavailable {
|
||||
t.Errorf("Expected 503, got %d", w.Code)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestShutdownCallbacks(t *testing.T) {
|
||||
callbackExecuted := false
|
||||
|
||||
RegisterShutdownCallback(func(ctx context.Context) error {
|
||||
callbackExecuted = true
|
||||
return nil
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
err := executeShutdownCallbacks(ctx)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("executeShutdownCallbacks() error = %v", err)
|
||||
}
|
||||
|
||||
if !callbackExecuted {
|
||||
t.Error("Shutdown callback was not executed")
|
||||
}
|
||||
|
||||
// Reset for other tests
|
||||
shutdownCallbacks = nil
|
||||
}
|
||||
|
||||
func TestDrainRequests(t *testing.T) {
|
||||
srv := NewGracefulServer(Config{
|
||||
Addr: ":0",
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),
|
||||
DrainTimeout: 1 * time.Second,
|
||||
})
|
||||
|
||||
// Simulate in-flight requests
|
||||
srv.inFlightRequests.Add(3)
|
||||
|
||||
// Start draining in background
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
// Simulate requests completing
|
||||
srv.inFlightRequests.Add(-3)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := srv.drainRequests(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("drainRequests() error = %v", err)
|
||||
}
|
||||
|
||||
if srv.InFlightRequests() != 0 {
|
||||
t.Errorf("In-flight requests should be 0, got %d", srv.InFlightRequests())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDrainRequestsTimeout(t *testing.T) {
|
||||
srv := NewGracefulServer(Config{
|
||||
Addr: ":0",
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),
|
||||
DrainTimeout: 100 * time.Millisecond,
|
||||
})
|
||||
|
||||
// Simulate in-flight requests that don't complete
|
||||
srv.inFlightRequests.Add(5)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
err := srv.drainRequests(ctx)
|
||||
if err == nil {
|
||||
t.Error("drainRequests() should timeout with error")
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
srv.inFlightRequests.Add(-5)
|
||||
}
|
||||
|
||||
func TestGetClientIP(t *testing.T) {
|
||||
// This test is in ratelimit_test.go since getClientIP is used by rate limiter
|
||||
// Including here for completeness of server tests
|
||||
}
|
||||
190
pkg/server/tls.go
Normal file
190
pkg/server/tls.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
)
|
||||
|
||||
// generateSelfSignedCert generates a self-signed certificate for the given host.
|
||||
// Returns the certificate and private key in PEM format.
|
||||
func generateSelfSignedCert(host string) (certPEM, keyPEM []byte, err error) {
|
||||
// Generate private key
|
||||
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate private key: %w", err)
|
||||
}
|
||||
|
||||
// Create certificate template
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(365 * 24 * time.Hour) // Valid for 1 year
|
||||
|
||||
serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate serial number: %w", err)
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"ResolveSpec Self-Signed"},
|
||||
CommonName: host,
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
// Add host as DNS name or IP address
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
template.IPAddresses = []net.IP{ip}
|
||||
} else {
|
||||
template.DNSNames = []string{host}
|
||||
}
|
||||
|
||||
// Create certificate
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create certificate: %w", err)
|
||||
}
|
||||
|
||||
// Encode certificate to PEM
|
||||
certPEM = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
|
||||
// Encode private key to PEM
|
||||
privBytes, err := x509.MarshalECPrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
keyPEM = pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: privBytes})
|
||||
|
||||
return certPEM, keyPEM, nil
|
||||
}
|
||||
|
||||
// saveCertToTempFiles saves certificate and key PEM data to temporary files.
|
||||
// Returns the file paths for the certificate and key.
|
||||
func saveCertToTempFiles(certPEM, keyPEM []byte) (certFile, keyFile string, err error) {
|
||||
// Create temporary directory
|
||||
tmpDir, err := os.MkdirTemp("", "resolvespec-certs-*")
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
|
||||
certFile = filepath.Join(tmpDir, "cert.pem")
|
||||
keyFile = filepath.Join(tmpDir, "key.pem")
|
||||
|
||||
// Write certificate
|
||||
if err := os.WriteFile(certFile, certPEM, 0600); err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
return "", "", fmt.Errorf("failed to write certificate: %w", err)
|
||||
}
|
||||
|
||||
// Write key
|
||||
if err := os.WriteFile(keyFile, keyPEM, 0600); err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
return "", "", fmt.Errorf("failed to write private key: %w", err)
|
||||
}
|
||||
|
||||
return certFile, keyFile, nil
|
||||
}
|
||||
|
||||
// setupAutoTLS configures automatic TLS certificate management using Let's Encrypt.
|
||||
// Returns a TLS config that can be used with http.Server.
|
||||
func setupAutoTLS(domains []string, email, cacheDir string) (*tls.Config, error) {
|
||||
if len(domains) == 0 {
|
||||
return nil, fmt.Errorf("at least one domain must be specified for AutoTLS")
|
||||
}
|
||||
|
||||
// Set default cache directory
|
||||
if cacheDir == "" {
|
||||
cacheDir = "./certs-cache"
|
||||
}
|
||||
|
||||
// Create cache directory if it doesn't exist
|
||||
if err := os.MkdirAll(cacheDir, 0700); err != nil {
|
||||
return nil, fmt.Errorf("failed to create certificate cache directory: %w", err)
|
||||
}
|
||||
|
||||
// Create autocert manager
|
||||
m := &autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
Cache: autocert.DirCache(cacheDir),
|
||||
HostPolicy: autocert.HostWhitelist(domains...),
|
||||
Email: email,
|
||||
}
|
||||
|
||||
// Create TLS config
|
||||
tlsConfig := m.TLSConfig()
|
||||
tlsConfig.MinVersion = tls.VersionTLS12
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// configureTLS configures TLS for the server based on the provided configuration.
|
||||
// Returns the TLS config and certificate/key file paths (if applicable).
|
||||
func configureTLS(cfg Config) (*tls.Config, string, string, error) {
|
||||
// Option 1: Certificate files provided
|
||||
if cfg.SSLCert != "" && cfg.SSLKey != "" {
|
||||
// Validate that files exist
|
||||
if _, err := os.Stat(cfg.SSLCert); os.IsNotExist(err) {
|
||||
return nil, "", "", fmt.Errorf("SSL certificate file not found: %s", cfg.SSLCert)
|
||||
}
|
||||
if _, err := os.Stat(cfg.SSLKey); os.IsNotExist(err) {
|
||||
return nil, "", "", fmt.Errorf("SSL key file not found: %s", cfg.SSLKey)
|
||||
}
|
||||
|
||||
// Return basic TLS config - cert/key will be loaded by ListenAndServeTLS
|
||||
tlsConfig := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
return tlsConfig, cfg.SSLCert, cfg.SSLKey, nil
|
||||
}
|
||||
|
||||
// Option 2: Auto TLS (Let's Encrypt)
|
||||
if cfg.AutoTLS {
|
||||
tlsConfig, err := setupAutoTLS(cfg.AutoTLSDomains, cfg.AutoTLSEmail, cfg.AutoTLSCacheDir)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("failed to setup AutoTLS: %w", err)
|
||||
}
|
||||
return tlsConfig, "", "", nil
|
||||
}
|
||||
|
||||
// Option 3: Self-signed certificate
|
||||
if cfg.SelfSignedSSL {
|
||||
host := cfg.Host
|
||||
if host == "" || host == "0.0.0.0" {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
certPEM, keyPEM, err := generateSelfSignedCert(host)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("failed to generate self-signed certificate: %w", err)
|
||||
}
|
||||
|
||||
certFile, keyFile, err := saveCertToTempFiles(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("failed to save self-signed certificate: %w", err)
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
return tlsConfig, certFile, keyFile, nil
|
||||
}
|
||||
|
||||
return nil, "", "", nil
|
||||
}
|
||||
@@ -465,7 +465,7 @@ func processRequest(ctx context.Context) {
|
||||
|
||||
1. **Check collector is running:**
|
||||
```bash
|
||||
docker-compose ps
|
||||
podman compose ps
|
||||
```
|
||||
|
||||
2. **Verify endpoint:**
|
||||
@@ -476,7 +476,7 @@ func processRequest(ctx context.Context) {
|
||||
|
||||
3. **Check logs:**
|
||||
```bash
|
||||
docker-compose logs otel-collector
|
||||
podman compose logs otel-collector
|
||||
```
|
||||
|
||||
### Disable Tracing
|
||||
|
||||
@@ -14,33 +14,33 @@ NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${GREEN}=== ResolveSpec Integration Tests ===${NC}\n"
|
||||
|
||||
# Check if docker-compose is available
|
||||
if ! command -v docker-compose &> /dev/null; then
|
||||
echo -e "${RED}Error: docker-compose is not installed${NC}"
|
||||
echo "Please install docker-compose or run PostgreSQL manually"
|
||||
# Check if podman compose is available
|
||||
if ! command -v podman &> /dev/null; then
|
||||
echo -e "${RED}Error: podman is not installed${NC}"
|
||||
echo "Please install podman or run PostgreSQL manually"
|
||||
echo "See INTEGRATION_TESTS.md for details"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clean up any existing containers and networks from previous runs
|
||||
echo -e "${YELLOW}Cleaning up existing containers and networks...${NC}"
|
||||
docker-compose down -v 2>/dev/null || true
|
||||
podman compose down -v 2>/dev/null || true
|
||||
|
||||
# Start PostgreSQL
|
||||
echo -e "${YELLOW}Starting PostgreSQL...${NC}"
|
||||
docker-compose up -d postgres-test
|
||||
podman compose up -d postgres-test
|
||||
|
||||
# Wait for PostgreSQL to be ready
|
||||
echo -e "${YELLOW}Waiting for PostgreSQL to be ready...${NC}"
|
||||
max_attempts=30
|
||||
attempt=0
|
||||
|
||||
while ! docker-compose exec -T postgres-test pg_isready -U postgres > /dev/null 2>&1; do
|
||||
while ! podman compose exec -T postgres-test pg_isready -U postgres > /dev/null 2>&1; do
|
||||
attempt=$((attempt + 1))
|
||||
if [ $attempt -ge $max_attempts ]; then
|
||||
echo -e "${RED}Error: PostgreSQL failed to start after ${max_attempts} seconds${NC}"
|
||||
docker-compose logs postgres-test
|
||||
docker-compose down
|
||||
podman compose logs postgres-test
|
||||
podman compose down
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
@@ -51,8 +51,8 @@ echo -e "\n${GREEN}PostgreSQL is ready!${NC}\n"
|
||||
|
||||
# Create test databases
|
||||
echo -e "${YELLOW}Creating test databases...${NC}"
|
||||
docker-compose exec -T postgres-test psql -U postgres -c "CREATE DATABASE resolvespec_test;" 2>/dev/null || echo " resolvespec_test already exists"
|
||||
docker-compose exec -T postgres-test psql -U postgres -c "CREATE DATABASE restheadspec_test;" 2>/dev/null || echo " restheadspec_test already exists"
|
||||
podman compose exec -T postgres-test psql -U postgres -c "CREATE DATABASE resolvespec_test;" 2>/dev/null || echo " resolvespec_test already exists"
|
||||
podman compose exec -T postgres-test psql -U postgres -c "CREATE DATABASE restheadspec_test;" 2>/dev/null || echo " restheadspec_test already exists"
|
||||
echo -e "${GREEN}Test databases ready!${NC}\n"
|
||||
|
||||
# Determine which tests to run
|
||||
@@ -79,6 +79,6 @@ fi
|
||||
|
||||
# Cleanup
|
||||
echo -e "\n${YELLOW}Stopping PostgreSQL...${NC}"
|
||||
docker-compose down
|
||||
podman compose down
|
||||
|
||||
exit $EXIT_CODE
|
||||
|
||||
@@ -19,14 +19,14 @@ Integration tests validate the full functionality of both `pkg/resolvespec` and
|
||||
|
||||
- Go 1.19 or later
|
||||
- PostgreSQL 12 or later
|
||||
- Docker and Docker Compose (optional, for easy setup)
|
||||
- Podman and Podman Compose (optional, for easy setup)
|
||||
|
||||
## Quick Start with Docker
|
||||
## Quick Start with Podman
|
||||
|
||||
### 1. Start PostgreSQL with Docker Compose
|
||||
### 1. Start PostgreSQL with Podman Compose
|
||||
|
||||
```bash
|
||||
docker-compose up -d postgres-test
|
||||
podman compose up -d postgres-test
|
||||
```
|
||||
|
||||
This starts a PostgreSQL container with the following default settings:
|
||||
@@ -52,7 +52,7 @@ go test -tags=integration ./pkg/restheadspec -v
|
||||
### 3. Stop PostgreSQL
|
||||
|
||||
```bash
|
||||
docker-compose down
|
||||
podman compose down
|
||||
```
|
||||
|
||||
## Manual PostgreSQL Setup
|
||||
@@ -161,7 +161,7 @@ If you see "connection refused" errors:
|
||||
|
||||
1. Check that PostgreSQL is running:
|
||||
```bash
|
||||
docker-compose ps
|
||||
podman compose ps
|
||||
```
|
||||
|
||||
2. Verify connection parameters:
|
||||
@@ -194,10 +194,10 @@ Each test automatically cleans up its data using `TRUNCATE`. If you need a fresh
|
||||
|
||||
```bash
|
||||
# Stop and remove containers (removes data)
|
||||
docker-compose down -v
|
||||
podman compose down -v
|
||||
|
||||
# Restart
|
||||
docker-compose up -d postgres-test
|
||||
podman compose up -d postgres-test
|
||||
```
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
@@ -119,13 +119,13 @@ Integration tests require a PostgreSQL database and use the `// +build integrati
|
||||
- PostgreSQL 12+ installed and running
|
||||
- Create test databases manually (see below)
|
||||
|
||||
### Setup with Docker
|
||||
### Setup with Podman
|
||||
|
||||
1. **Start PostgreSQL**:
|
||||
```bash
|
||||
make docker-up
|
||||
# or
|
||||
docker-compose up -d postgres-test
|
||||
podman compose up -d postgres-test
|
||||
```
|
||||
|
||||
2. **Run Tests**:
|
||||
@@ -141,10 +141,10 @@ Integration tests require a PostgreSQL database and use the `// +build integrati
|
||||
```bash
|
||||
make docker-down
|
||||
# or
|
||||
docker-compose down
|
||||
podman compose down
|
||||
```
|
||||
|
||||
### Setup without Docker
|
||||
### Setup without Podman
|
||||
|
||||
1. **Create Databases**:
|
||||
```sql
|
||||
@@ -289,8 +289,8 @@ go test -tags=integration ./pkg/resolvespec -v
|
||||
**Problem**: "connection refused" or "database does not exist"
|
||||
|
||||
**Solutions**:
|
||||
1. Check PostgreSQL is running: `docker-compose ps`
|
||||
2. Verify databases exist: `docker-compose exec postgres-test psql -U postgres -l`
|
||||
1. Check PostgreSQL is running: `podman compose ps`
|
||||
2. Verify databases exist: `podman compose exec postgres-test psql -U postgres -l`
|
||||
3. Check environment variable: `echo $TEST_DATABASE_URL`
|
||||
4. Recreate databases: `make clean && make docker-up`
|
||||
|
||||
|
||||
Reference in New Issue
Block a user