mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2026-01-02 18:04:25 +00:00
Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ebd03d10ad | ||
|
|
4ee6ef0955 | ||
|
|
6f05f15ff6 | ||
|
|
443a672fcb | ||
|
|
c2fcc5aaff | ||
|
|
6664a4e2d2 | ||
| 037bd4c05e | |||
|
|
e77468a239 | ||
|
|
82d84435f2 | ||
|
|
b99b08430e | ||
|
|
fae9a082bd | ||
|
|
191822b91c | ||
|
|
a6a17d019f | ||
|
|
a7cc42044b | ||
|
|
8cdc353029 | ||
|
|
6528e94297 | ||
|
|
f711bf38d2 | ||
|
|
44356d8750 | ||
|
|
caf85cf558 | ||
|
|
2e1547ec65 | ||
|
|
49cdc6f17b | ||
|
|
0bd653820c | ||
|
|
9209193157 | ||
|
|
b8c44c5a99 | ||
|
|
28fd88fff1 | ||
|
|
be38341383 | ||
|
|
fab744b878 | ||
|
|
5ad2bd3a78 | ||
|
|
333fe158e9 | ||
|
|
2a2d351ad4 | ||
|
|
e918c49b84 | ||
|
|
41e4956510 | ||
|
|
8e8c3c6de6 | ||
|
|
aa9b7312f6 | ||
|
|
dca43b0e05 | ||
|
|
6f368bbce5 | ||
|
|
8704cee941 | ||
|
|
4ce5afe0ac | ||
|
|
7b98ea2145 |
9
.github/workflows/tests.yml
vendored
9
.github/workflows/tests.yml
vendored
@@ -17,11 +17,13 @@ jobs:
|
||||
- name: Run unit tests
|
||||
run: go test ./pkg/resolvespec ./pkg/restheadspec -v -cover
|
||||
- name: Generate coverage report
|
||||
continue-on-error: true
|
||||
run: |
|
||||
go test ./pkg/resolvespec ./pkg/restheadspec -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out -o coverage.html
|
||||
- name: Upload coverage
|
||||
uses: actions/upload-artifact@v5
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage.html
|
||||
@@ -55,27 +57,34 @@ jobs:
|
||||
psql -h localhost -U postgres -c "CREATE DATABASE resolvespec_test;"
|
||||
psql -h localhost -U postgres -c "CREATE DATABASE restheadspec_test;"
|
||||
- name: Run resolvespec integration tests
|
||||
continue-on-error: true
|
||||
env:
|
||||
TEST_DATABASE_URL: "host=localhost user=postgres password=postgres dbname=resolvespec_test port=5432 sslmode=disable"
|
||||
run: go test -tags=integration ./pkg/resolvespec -v -coverprofile=coverage-resolvespec-integration.out
|
||||
- name: Run restheadspec integration tests
|
||||
continue-on-error: true
|
||||
env:
|
||||
TEST_DATABASE_URL: "host=localhost user=postgres password=postgres dbname=restheadspec_test port=5432 sslmode=disable"
|
||||
run: go test -tags=integration ./pkg/restheadspec -v -coverprofile=coverage-restheadspec-integration.out
|
||||
- name: Generate integration coverage
|
||||
continue-on-error: true
|
||||
env:
|
||||
TEST_DATABASE_URL: "host=localhost user=postgres password=postgres dbname=resolvespec_test port=5432 sslmode=disable"
|
||||
run: |
|
||||
go tool cover -html=coverage-resolvespec-integration.out -o coverage-resolvespec-integration.html
|
||||
go tool cover -html=coverage-restheadspec-integration.out -o coverage-restheadspec-integration.html
|
||||
|
||||
- name: Upload resolvespec integration coverage
|
||||
uses: actions/upload-artifact@v5
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: resolvespec-integration-coverage-report
|
||||
path: coverage-resolvespec-integration.html
|
||||
|
||||
- name: Upload restheadspec integration coverage
|
||||
uses: actions/upload-artifact@v5
|
||||
continue-on-error: true
|
||||
|
||||
with:
|
||||
name: integration-coverage-restheadspec-report
|
||||
path: coverage-restheadspec-integration
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -25,4 +25,4 @@ go.work.sum
|
||||
.env
|
||||
bin/
|
||||
test.db
|
||||
testserver
|
||||
/testserver
|
||||
|
||||
6
.vscode/settings.json
vendored
6
.vscode/settings.json
vendored
@@ -52,5 +52,9 @@
|
||||
"upgrade_dependency": true,
|
||||
"vendor": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"conventionalCommits.scopes": [
|
||||
"spectypes",
|
||||
"dbmanager"
|
||||
]
|
||||
}
|
||||
86
LICENSE
86
LICENSE
@@ -1,21 +1,73 @@
|
||||
MIT License
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Copyright (c) 2025
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
1. Definitions.
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
|
||||
|
||||
Copyright 2025 wdevs
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
22
Makefile
22
Makefile
@@ -13,14 +13,22 @@ test-integration:
|
||||
# Run all tests (unit + integration)
|
||||
test: test-unit test-integration
|
||||
|
||||
release-version: ## Create and push a release with specific version (use: make release-version VERSION=v1.2.3)
|
||||
release-version: ## Create and push a release with specific version (use: make release-version VERSION=v1.2.3 or make release-version to auto-increment)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "Error: VERSION is required. Usage: make release-version VERSION=v1.2.3"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@version="$(VERSION)"; \
|
||||
if ! echo "$$version" | grep -q "^v"; then \
|
||||
version="v$$version"; \
|
||||
latest_tag=$$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0"); \
|
||||
echo "No VERSION specified. Last version: $$latest_tag"; \
|
||||
version_num=$$(echo "$$latest_tag" | sed 's/^v//'); \
|
||||
major=$$(echo "$$version_num" | cut -d. -f1); \
|
||||
minor=$$(echo "$$version_num" | cut -d. -f2); \
|
||||
patch=$$(echo "$$version_num" | cut -d. -f3); \
|
||||
new_patch=$$((patch + 1)); \
|
||||
version="v$$major.$$minor.$$new_patch"; \
|
||||
echo "Auto-incrementing to: $$version"; \
|
||||
else \
|
||||
version="$(VERSION)"; \
|
||||
if ! echo "$$version" | grep -q "^v"; then \
|
||||
version="v$$version"; \
|
||||
fi; \
|
||||
fi; \
|
||||
echo "Creating release: $$version"; \
|
||||
latest_tag=$$(git describe --tags --abbrev=0 2>/dev/null || echo ""); \
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/config"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/dbmanager"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/modelregistry"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server"
|
||||
@@ -15,7 +17,6 @@ import (
|
||||
"github.com/bitechdev/ResolveSpec/pkg/resolvespec"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"gorm.io/gorm"
|
||||
gormlog "gorm.io/gorm/logger"
|
||||
)
|
||||
@@ -40,12 +41,14 @@ func main() {
|
||||
logger.Info("ResolveSpec test server starting")
|
||||
logger.Info("Configuration loaded - Server will listen on: %s", cfg.Server.Addr)
|
||||
|
||||
// Initialize database
|
||||
db, err := initDB(cfg)
|
||||
// Initialize database manager
|
||||
ctx := context.Background()
|
||||
dbMgr, db, err := initDB(ctx, cfg)
|
||||
if err != nil {
|
||||
logger.Error("Failed to initialize database: %+v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer dbMgr.Close()
|
||||
|
||||
// Create router
|
||||
r := mux.NewRouter()
|
||||
@@ -117,7 +120,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func initDB(cfg *config.Config) (*gorm.DB, error) {
|
||||
func initDB(ctx context.Context, cfg *config.Config) (dbmanager.Manager, *gorm.DB, error) {
|
||||
// Configure GORM logger based on config
|
||||
logLevel := gormlog.Info
|
||||
if !cfg.Logger.Dev {
|
||||
@@ -135,25 +138,41 @@ func initDB(cfg *config.Config) (*gorm.DB, error) {
|
||||
},
|
||||
)
|
||||
|
||||
// Use database URL from config if available, otherwise use default SQLite
|
||||
dbURL := cfg.Database.URL
|
||||
if dbURL == "" {
|
||||
dbURL = "test.db"
|
||||
// Create database manager from config
|
||||
mgr, err := dbmanager.NewManager(dbmanager.FromConfig(cfg.DBManager))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create database manager: %w", err)
|
||||
}
|
||||
|
||||
// Create SQLite database
|
||||
db, err := gorm.Open(sqlite.Open(dbURL), &gorm.Config{Logger: newLogger, FullSaveAssociations: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Connect all databases
|
||||
if err := mgr.Connect(ctx); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to connect databases: %w", err)
|
||||
}
|
||||
|
||||
// Get default connection
|
||||
conn, err := mgr.GetDefault()
|
||||
if err != nil {
|
||||
mgr.Close()
|
||||
return nil, nil, fmt.Errorf("failed to get default connection: %w", err)
|
||||
}
|
||||
|
||||
// Get GORM database
|
||||
gormDB, err := conn.GORM()
|
||||
if err != nil {
|
||||
mgr.Close()
|
||||
return nil, nil, fmt.Errorf("failed to get GORM database: %w", err)
|
||||
}
|
||||
|
||||
// Update GORM logger
|
||||
gormDB.Logger = newLogger
|
||||
|
||||
modelList := testmodels.GetTestModels()
|
||||
|
||||
// Auto migrate schemas
|
||||
err = db.AutoMigrate(modelList...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if err := gormDB.AutoMigrate(modelList...); err != nil {
|
||||
mgr.Close()
|
||||
return nil, nil, fmt.Errorf("failed to auto migrate: %w", err)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
return mgr, gormDB, nil
|
||||
}
|
||||
|
||||
24
config.yaml
24
config.yaml
@@ -37,5 +37,25 @@ cors:
|
||||
tracing:
|
||||
enabled: false
|
||||
|
||||
database:
|
||||
url: "" # Empty means use default SQLite (test.db)
|
||||
# Database Manager Configuration
|
||||
dbmanager:
|
||||
default_connection: "primary"
|
||||
max_open_conns: 25
|
||||
max_idle_conns: 5
|
||||
conn_max_lifetime: 30m
|
||||
conn_max_idle_time: 5m
|
||||
retry_attempts: 3
|
||||
retry_delay: 1s
|
||||
health_check_interval: 30s
|
||||
enable_auto_reconnect: true
|
||||
|
||||
connections:
|
||||
primary:
|
||||
name: "primary"
|
||||
type: "sqlite"
|
||||
filepath: "test.db"
|
||||
default_orm: "gorm"
|
||||
enable_logging: true
|
||||
enable_metrics: false
|
||||
connect_timeout: 10s
|
||||
query_timeout: 30s
|
||||
|
||||
17
go.mod
17
go.mod
@@ -15,6 +15,8 @@ require (
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/jackc/pgx/v5 v5.6.0
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/mattn/go-sqlite3 v1.14.32
|
||||
github.com/microsoft/go-mssqldb v1.9.5
|
||||
github.com/mochi-mqtt/server/v2 v2.7.9
|
||||
github.com/nats-io/nats.go v1.48.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
@@ -25,9 +27,12 @@ require (
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
github.com/tidwall/sjson v1.2.5
|
||||
github.com/uptrace/bun v1.2.16
|
||||
github.com/uptrace/bun/dialect/mssqldialect v1.2.16
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.16
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.16
|
||||
github.com/uptrace/bun/driver/sqliteshim v1.2.16
|
||||
github.com/uptrace/bunrouter v1.0.23
|
||||
go.mongodb.org/mongo-driver v1.17.6
|
||||
go.opentelemetry.io/otel v1.38.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
|
||||
@@ -38,6 +43,7 @@ require (
|
||||
golang.org/x/time v0.14.0
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/driver/sqlite v1.6.0
|
||||
gorm.io/driver/sqlserver v1.6.3
|
||||
gorm.io/gorm v1.30.0
|
||||
)
|
||||
|
||||
@@ -69,6 +75,9 @@ require (
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
|
||||
github.com/golang-sql/sqlexp v0.1.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
@@ -78,7 +87,6 @@ require (
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.10 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.32 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
@@ -86,6 +94,7 @@ require (
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/nkeys v0.4.11 // indirect
|
||||
@@ -105,6 +114,7 @@ require (
|
||||
github.com/rs/xid v1.4.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
@@ -119,6 +129,10 @@ require (
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
@@ -128,6 +142,7 @@ require (
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
|
||||
190
go.sum
190
go.sum
@@ -2,8 +2,32 @@ dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1/go.mod h1:uE9zaUfEQT/nbQjVi2IblCG9iaLtZsuYZ8ne+PuQ02M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1 h1:Wgf5rZba3YZqeTNJPtvqZoBu1sBN/L4sry+u2U3Y75w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1/go.mod h1:xxCBG/f/4Vbmh2XQJBsOmNdxWUY5j/s27jujKPbQf14=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 h1:bFWuoEKg+gImo7pvkiQEFAc8ocibADgXeiLAxWhWmkI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1/go.mod h1:Vih/3yc6yac2JzU4hzpaDupBJP0Flaia9rXXrU8xyww=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
@@ -32,6 +56,7 @@ github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpS
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
||||
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -41,6 +66,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
@@ -76,21 +103,37 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
|
||||
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
@@ -101,6 +144,12 @@ github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
|
||||
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
|
||||
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
|
||||
github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
@@ -110,8 +159,11 @@ github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
@@ -124,6 +176,9 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
||||
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/microsoft/go-mssqldb v1.8.2/go.mod h1:vp38dT33FGfVotRiTmDo3bFyaHq+p3LektQrjTULowo=
|
||||
github.com/microsoft/go-mssqldb v1.9.5 h1:orwya0X/5bsL1o+KasupTkk2eNTNFkTQG0BEe/HxCn0=
|
||||
github.com/microsoft/go-mssqldb v1.9.5/go.mod h1:VCP2a0KEZZtGLRHd1PsLavLFYy/3xX2yJUPycv3Sr2Q=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
@@ -142,6 +197,10 @@ github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/mochi-mqtt/server/v2 v2.7.9 h1:y0g4vrSLAag7T07l2oCzOa/+nKVLoazKEWAArwqBNYI=
|
||||
github.com/mochi-mqtt/server/v2 v2.7.9/go.mod h1:lZD3j35AVNqJL5cezlnSkuG05c0FCHSsfAKSPBOSbqc=
|
||||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
|
||||
github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
||||
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
@@ -162,6 +221,10 @@ github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -182,6 +245,8 @@ github.com/redis/go-redis/v9 v9.17.1 h1:7tl732FjYPRT9H9aNfyTwKg9iTETjWjGKEJ2t/5i
|
||||
github.com/redis/go-redis/v9 v9.17.1/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
|
||||
@@ -190,6 +255,8 @@ github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDc
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
|
||||
@@ -203,10 +270,18 @@ github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3A
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
@@ -228,6 +303,10 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
||||
github.com/uptrace/bun/dialect/mssqldialect v1.2.16 h1:rKv0cKPNBviXadB/+2Y/UedA/c1JnwGzUWZkdN5FdSQ=
|
||||
github.com/uptrace/bun/dialect/mssqldialect v1.2.16/go.mod h1:J5U7tGKWDsx2Q7MwDZF2417jCdpD6yD/ZMFJcCR80bk=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.16 h1:KFNZ0LxAyczKNfK/IJWMyaleO6eI9/Z5tUv3DE1NVL4=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.16/go.mod h1:IJdMeV4sLfh0LDUZl7TIxLI0LipF1vwTK3hBC7p5qLo=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.16 h1:6wVAiYLj1pMibRthGwy4wDLa3D5AQo32Y8rvwPd8CQ0=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.16/go.mod h1:Z7+5qK8CGZkDQiPMu+LSdVuDuR1I5jcwtkB1Pi3F82E=
|
||||
github.com/uptrace/bun/driver/sqliteshim v1.2.16 h1:M6Dh5kkDWFbUWBrOsIE1g1zdZ5JbSytTD4piFRBOUAI=
|
||||
@@ -240,8 +319,19 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/warkanum/bun v1.2.17 h1:HP8eTuKSNcqMDhhIPFxEbgV/yct6RR0/c3qHH3PNZUA=
|
||||
github.com/warkanum/bun v1.2.17/go.mod h1:jMoNg2n56ckaawi/O/J92BHaECmrz6IRjuMWqlMaMTM=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss=
|
||||
go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||
@@ -274,33 +364,127 @@ go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
@@ -315,6 +499,10 @@ google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXn
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
@@ -322,6 +510,8 @@ gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ=
|
||||
gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8=
|
||||
gorm.io/driver/sqlserver v1.6.3 h1:UR+nWCuphPnq7UxnL57PSrlYjuvs+sf1N59GgFX7uAI=
|
||||
gorm.io/driver/sqlserver v1.6.3/go.mod h1:VZeNn7hqX1aXoN5TPAFGWvxWG90xtA8erGn2gQmpc6U=
|
||||
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
|
||||
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/uptrace/bun/dialect/mssqldialect"
|
||||
"github.com/uptrace/bun/dialect/pgdialect"
|
||||
"github.com/uptrace/bun/dialect/sqlitedialect"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/driver/sqlserver"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// parseTableName splits a table name that may contain schema into separate schema and table
|
||||
@@ -14,3 +23,39 @@ func parseTableName(fullTableName string) (schema, table string) {
|
||||
}
|
||||
return "", fullTableName
|
||||
}
|
||||
|
||||
// GetPostgresDialect returns a Bun PostgreSQL dialect
|
||||
func GetPostgresDialect() *pgdialect.Dialect {
|
||||
return pgdialect.New()
|
||||
}
|
||||
|
||||
// GetSQLiteDialect returns a Bun SQLite dialect
|
||||
func GetSQLiteDialect() *sqlitedialect.Dialect {
|
||||
return sqlitedialect.New()
|
||||
}
|
||||
|
||||
// GetMSSQLDialect returns a Bun MSSQL dialect
|
||||
func GetMSSQLDialect() *mssqldialect.Dialect {
|
||||
return mssqldialect.New()
|
||||
}
|
||||
|
||||
// GetPostgresDialector returns a GORM PostgreSQL dialector
|
||||
func GetPostgresDialector(db *sql.DB) gorm.Dialector {
|
||||
return postgres.New(postgres.Config{
|
||||
Conn: db,
|
||||
})
|
||||
}
|
||||
|
||||
// GetSQLiteDialector returns a GORM SQLite dialector
|
||||
func GetSQLiteDialector(db *sql.DB) gorm.Dialector {
|
||||
return sqlite.Dialector{
|
||||
Conn: db,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMSSQLDialector returns a GORM MSSQL dialector
|
||||
func GetMSSQLDialector(db *sql.DB) gorm.Dialector {
|
||||
return sqlserver.New(sqlserver.Config{
|
||||
Conn: db,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -234,35 +234,52 @@ func stripOuterParentheses(s string) string {
|
||||
s = strings.TrimSpace(s)
|
||||
|
||||
for {
|
||||
if len(s) < 2 || s[0] != '(' || s[len(s)-1] != ')' {
|
||||
stripped, wasStripped := stripOneMatchingOuterParen(s)
|
||||
if !wasStripped {
|
||||
return s
|
||||
}
|
||||
s = stripped
|
||||
}
|
||||
}
|
||||
|
||||
// Check if these parentheses match (i.e., they're the outermost pair)
|
||||
depth := 0
|
||||
matched := false
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '(':
|
||||
depth++
|
||||
case ')':
|
||||
depth--
|
||||
if depth == 0 && i == len(s)-1 {
|
||||
matched = true
|
||||
} else if depth == 0 {
|
||||
// Found a closing paren before the end, so outer parens don't match
|
||||
return s
|
||||
}
|
||||
// stripOneOuterParentheses removes only one level of matching outer parentheses from a string
|
||||
// Unlike stripOuterParentheses, this only strips once, preserving nested parentheses
|
||||
func stripOneOuterParentheses(s string) string {
|
||||
stripped, _ := stripOneMatchingOuterParen(strings.TrimSpace(s))
|
||||
return stripped
|
||||
}
|
||||
|
||||
// stripOneMatchingOuterParen is a helper that strips one matching pair of outer parentheses
|
||||
// Returns the stripped string and a boolean indicating if stripping occurred
|
||||
func stripOneMatchingOuterParen(s string) (string, bool) {
|
||||
if len(s) < 2 || s[0] != '(' || s[len(s)-1] != ')' {
|
||||
return s, false
|
||||
}
|
||||
|
||||
// Check if these parentheses match (i.e., they're the outermost pair)
|
||||
depth := 0
|
||||
matched := false
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '(':
|
||||
depth++
|
||||
case ')':
|
||||
depth--
|
||||
if depth == 0 && i == len(s)-1 {
|
||||
matched = true
|
||||
} else if depth == 0 {
|
||||
// Found a closing paren before the end, so outer parens don't match
|
||||
return s, false
|
||||
}
|
||||
}
|
||||
|
||||
if !matched {
|
||||
return s
|
||||
}
|
||||
|
||||
// Strip the outer parentheses and continue
|
||||
s = strings.TrimSpace(s[1 : len(s)-1])
|
||||
}
|
||||
|
||||
if !matched {
|
||||
return s, false
|
||||
}
|
||||
|
||||
// Strip the outer parentheses
|
||||
return strings.TrimSpace(s[1 : len(s)-1]), true
|
||||
}
|
||||
|
||||
// splitByAND splits a WHERE clause by AND operators (case-insensitive)
|
||||
@@ -683,8 +700,8 @@ func AddTablePrefixToColumns(where string, tableName string) string {
|
||||
// - No valid column reference is found
|
||||
// - The column doesn't exist in the table (when validColumns is provided)
|
||||
func addPrefixToSingleCondition(cond string, tableName string, validColumns map[string]bool) string {
|
||||
// Strip outer grouping parentheses to get to the actual condition
|
||||
strippedCond := stripOuterParentheses(cond)
|
||||
// Strip one level of outer grouping parentheses to get to the actual condition
|
||||
strippedCond := stripOneOuterParentheses(cond)
|
||||
|
||||
// Skip SQL literals and trivial conditions (true, false, null, 1=1, etc.)
|
||||
if IsSQLExpression(strippedCond) || IsTrivialCondition(strippedCond) {
|
||||
@@ -692,6 +709,34 @@ func addPrefixToSingleCondition(cond string, tableName string, validColumns map[
|
||||
return cond
|
||||
}
|
||||
|
||||
// After stripping outer parentheses, check if there are multiple AND-separated conditions
|
||||
// at the top level. If so, split and process each separately to avoid incorrectly
|
||||
// treating "true AND status" as a single column name.
|
||||
subConditions := splitByAND(strippedCond)
|
||||
if len(subConditions) > 1 {
|
||||
// Multiple conditions found - process each separately
|
||||
logger.Debug("Found %d sub-conditions after stripping parentheses, processing separately", len(subConditions))
|
||||
processedConditions := make([]string, 0, len(subConditions))
|
||||
for _, subCond := range subConditions {
|
||||
// Recursively process each sub-condition
|
||||
processed := addPrefixToSingleCondition(subCond, tableName, validColumns)
|
||||
processedConditions = append(processedConditions, processed)
|
||||
}
|
||||
result := strings.Join(processedConditions, " AND ")
|
||||
// Preserve original outer parentheses if they existed
|
||||
if cond != strippedCond {
|
||||
result = "(" + result + ")"
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// If we stripped parentheses and still have more parentheses, recursively process
|
||||
if cond != strippedCond && strings.HasPrefix(strippedCond, "(") && strings.HasSuffix(strippedCond, ")") {
|
||||
// Recursively handle nested parentheses
|
||||
processed := addPrefixToSingleCondition(strippedCond, tableName, validColumns)
|
||||
return "(" + processed + ")"
|
||||
}
|
||||
|
||||
// Extract the left side of the comparison (before the operator)
|
||||
columnRef := extractLeftSideOfComparison(strippedCond)
|
||||
if columnRef == "" {
|
||||
|
||||
@@ -658,3 +658,76 @@ func TestSanitizeWhereClauseWithModel(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddTablePrefixToColumns_ComplexConditions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
where string
|
||||
tableName string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Parentheses with true AND condition - should not prefix true",
|
||||
where: "(true AND status = 'active')",
|
||||
tableName: "mastertask",
|
||||
expected: "(true AND mastertask.status = 'active')",
|
||||
},
|
||||
{
|
||||
name: "Parentheses with multiple conditions including true",
|
||||
where: "(true AND status = 'active' AND id > 5)",
|
||||
tableName: "mastertask",
|
||||
expected: "(true AND mastertask.status = 'active' AND mastertask.id > 5)",
|
||||
},
|
||||
{
|
||||
name: "Nested parentheses with true",
|
||||
where: "((true AND status = 'active'))",
|
||||
tableName: "mastertask",
|
||||
expected: "((true AND mastertask.status = 'active'))",
|
||||
},
|
||||
{
|
||||
name: "Mixed: false AND valid conditions",
|
||||
where: "(false AND name = 'test')",
|
||||
tableName: "mastertask",
|
||||
expected: "(false AND mastertask.name = 'test')",
|
||||
},
|
||||
{
|
||||
name: "Mixed: null AND valid conditions",
|
||||
where: "(null AND status = 'active')",
|
||||
tableName: "mastertask",
|
||||
expected: "(null AND mastertask.status = 'active')",
|
||||
},
|
||||
{
|
||||
name: "Multiple true conditions in parentheses",
|
||||
where: "(true AND true AND status = 'active')",
|
||||
tableName: "mastertask",
|
||||
expected: "(true AND true AND mastertask.status = 'active')",
|
||||
},
|
||||
{
|
||||
name: "Simple true without parens - should not prefix",
|
||||
where: "true",
|
||||
tableName: "mastertask",
|
||||
expected: "true",
|
||||
},
|
||||
{
|
||||
name: "Simple condition without parens - should prefix",
|
||||
where: "status = 'active'",
|
||||
tableName: "mastertask",
|
||||
expected: "mastertask.status = 'active'",
|
||||
},
|
||||
{
|
||||
name: "Unregistered table with true - should not prefix true",
|
||||
where: "(true AND status = 'active')",
|
||||
tableName: "unregistered_table",
|
||||
expected: "(true AND unregistered_table.status = 'active')",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := AddTablePrefixToColumns(tt.where, tt.tableName)
|
||||
if result != tt.expected {
|
||||
t.Errorf("AddTablePrefixToColumns(%q, %q) = %q; want %q", tt.where, tt.tableName, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ type Config struct {
|
||||
ErrorTracking ErrorTrackingConfig `mapstructure:"error_tracking"`
|
||||
Middleware MiddlewareConfig `mapstructure:"middleware"`
|
||||
CORS CORSConfig `mapstructure:"cors"`
|
||||
Database DatabaseConfig `mapstructure:"database"`
|
||||
EventBroker EventBrokerConfig `mapstructure:"event_broker"`
|
||||
DBManager DBManagerConfig `mapstructure:"dbmanager"`
|
||||
}
|
||||
|
||||
// ServerConfig holds server-related configuration
|
||||
@@ -76,11 +76,6 @@ type CORSConfig struct {
|
||||
MaxAge int `mapstructure:"max_age"`
|
||||
}
|
||||
|
||||
// DatabaseConfig holds database configuration (primarily for testing)
|
||||
type DatabaseConfig struct {
|
||||
URL string `mapstructure:"url"`
|
||||
}
|
||||
|
||||
// ErrorTrackingConfig holds error tracking configuration
|
||||
type ErrorTrackingConfig struct {
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
|
||||
107
pkg/config/dbmanager.go
Normal file
107
pkg/config/dbmanager.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DBManagerConfig contains configuration for the database connection manager
|
||||
type DBManagerConfig struct {
|
||||
// DefaultConnection is the name of the default connection to use
|
||||
DefaultConnection string `mapstructure:"default_connection"`
|
||||
|
||||
// Connections is a map of connection name to connection configuration
|
||||
Connections map[string]DBConnectionConfig `mapstructure:"connections"`
|
||||
|
||||
// Global connection pool defaults
|
||||
MaxOpenConns int `mapstructure:"max_open_conns"`
|
||||
MaxIdleConns int `mapstructure:"max_idle_conns"`
|
||||
ConnMaxLifetime time.Duration `mapstructure:"conn_max_lifetime"`
|
||||
ConnMaxIdleTime time.Duration `mapstructure:"conn_max_idle_time"`
|
||||
|
||||
// Retry policy
|
||||
RetryAttempts int `mapstructure:"retry_attempts"`
|
||||
RetryDelay time.Duration `mapstructure:"retry_delay"`
|
||||
RetryMaxDelay time.Duration `mapstructure:"retry_max_delay"`
|
||||
|
||||
// Health checks
|
||||
HealthCheckInterval time.Duration `mapstructure:"health_check_interval"`
|
||||
EnableAutoReconnect bool `mapstructure:"enable_auto_reconnect"`
|
||||
}
|
||||
|
||||
// DBConnectionConfig defines configuration for a single database connection
|
||||
type DBConnectionConfig struct {
|
||||
// Name is the unique name of this connection
|
||||
Name string `mapstructure:"name"`
|
||||
|
||||
// Type is the database type (postgres, sqlite, mssql, mongodb)
|
||||
Type string `mapstructure:"type"`
|
||||
|
||||
// DSN is the complete Data Source Name / connection string
|
||||
// If provided, this takes precedence over individual connection parameters
|
||||
DSN string `mapstructure:"dsn"`
|
||||
|
||||
// Connection parameters (used if DSN is not provided)
|
||||
Host string `mapstructure:"host"`
|
||||
Port int `mapstructure:"port"`
|
||||
User string `mapstructure:"user"`
|
||||
Password string `mapstructure:"password"`
|
||||
Database string `mapstructure:"database"`
|
||||
|
||||
// PostgreSQL/MSSQL specific
|
||||
SSLMode string `mapstructure:"sslmode"` // disable, require, verify-ca, verify-full
|
||||
Schema string `mapstructure:"schema"` // Default schema
|
||||
|
||||
// SQLite specific
|
||||
FilePath string `mapstructure:"filepath"`
|
||||
|
||||
// MongoDB specific
|
||||
AuthSource string `mapstructure:"auth_source"`
|
||||
ReplicaSet string `mapstructure:"replica_set"`
|
||||
ReadPreference string `mapstructure:"read_preference"` // primary, secondary, etc.
|
||||
|
||||
// Connection pool settings (overrides global defaults)
|
||||
MaxOpenConns *int `mapstructure:"max_open_conns"`
|
||||
MaxIdleConns *int `mapstructure:"max_idle_conns"`
|
||||
ConnMaxLifetime *time.Duration `mapstructure:"conn_max_lifetime"`
|
||||
ConnMaxIdleTime *time.Duration `mapstructure:"conn_max_idle_time"`
|
||||
|
||||
// Timeouts
|
||||
ConnectTimeout time.Duration `mapstructure:"connect_timeout"`
|
||||
QueryTimeout time.Duration `mapstructure:"query_timeout"`
|
||||
|
||||
// Features
|
||||
EnableTracing bool `mapstructure:"enable_tracing"`
|
||||
EnableMetrics bool `mapstructure:"enable_metrics"`
|
||||
EnableLogging bool `mapstructure:"enable_logging"`
|
||||
|
||||
// DefaultORM specifies which ORM to use for the Database() method
|
||||
// Options: "bun", "gorm", "native"
|
||||
DefaultORM string `mapstructure:"default_orm"`
|
||||
|
||||
// Tags for organization and filtering
|
||||
Tags map[string]string `mapstructure:"tags"`
|
||||
}
|
||||
|
||||
// ToManagerConfig converts config.DBManagerConfig to dbmanager.ManagerConfig
|
||||
// This is used to avoid circular dependencies
|
||||
func (c *DBManagerConfig) ToManagerConfig() interface{} {
|
||||
// This will be implemented in the dbmanager package
|
||||
// to convert from config types to dbmanager types
|
||||
return c
|
||||
}
|
||||
|
||||
// Validate validates the DBManager configuration
|
||||
func (c *DBManagerConfig) Validate() error {
|
||||
if len(c.Connections) == 0 {
|
||||
return fmt.Errorf("at least one connection must be configured")
|
||||
}
|
||||
|
||||
if c.DefaultConnection != "" {
|
||||
if _, ok := c.Connections[c.DefaultConnection]; !ok {
|
||||
return fmt.Errorf("default connection '%s' not found in connections", c.DefaultConnection)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
531
pkg/dbmanager/README.md
Normal file
531
pkg/dbmanager/README.md
Normal file
@@ -0,0 +1,531 @@
|
||||
# Database Connection Manager (dbmanager)
|
||||
|
||||
A comprehensive database connection manager for Go that provides centralized management of multiple named database connections with support for PostgreSQL, SQLite, MSSQL, and MongoDB.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multiple Named Connections**: Manage multiple database connections with names like `primary`, `analytics`, `cache-db`
|
||||
- **Multi-Database Support**: PostgreSQL, SQLite, Microsoft SQL Server, and MongoDB
|
||||
- **Multi-ORM Access**: Each SQL connection provides access through:
|
||||
- **Bun ORM** - Modern, lightweight ORM
|
||||
- **GORM** - Popular Go ORM
|
||||
- **Native** - Standard library `*sql.DB`
|
||||
- All three share the same underlying connection pool
|
||||
- **Configuration-Driven**: YAML configuration with Viper integration
|
||||
- **Production-Ready Features**:
|
||||
- Automatic health checks and reconnection
|
||||
- Prometheus metrics
|
||||
- Connection pooling with configurable limits
|
||||
- Retry logic with exponential backoff
|
||||
- Graceful shutdown
|
||||
- OpenTelemetry tracing support
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/bitechdev/ResolveSpec/pkg/dbmanager
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Configuration
|
||||
|
||||
Create a configuration file (e.g., `config.yaml`):
|
||||
|
||||
```yaml
|
||||
dbmanager:
|
||||
default_connection: "primary"
|
||||
|
||||
# Global connection pool defaults
|
||||
max_open_conns: 25
|
||||
max_idle_conns: 5
|
||||
conn_max_lifetime: 30m
|
||||
conn_max_idle_time: 5m
|
||||
|
||||
# Retry configuration
|
||||
retry_attempts: 3
|
||||
retry_delay: 1s
|
||||
retry_max_delay: 10s
|
||||
|
||||
# Health checks
|
||||
health_check_interval: 30s
|
||||
enable_auto_reconnect: true
|
||||
|
||||
connections:
|
||||
# Primary PostgreSQL connection
|
||||
primary:
|
||||
type: postgres
|
||||
host: localhost
|
||||
port: 5432
|
||||
user: myuser
|
||||
password: mypassword
|
||||
database: myapp
|
||||
sslmode: disable
|
||||
default_orm: bun
|
||||
enable_metrics: true
|
||||
enable_tracing: true
|
||||
enable_logging: true
|
||||
|
||||
# Read replica for analytics
|
||||
analytics:
|
||||
type: postgres
|
||||
dsn: "postgres://readonly:pass@analytics:5432/analytics"
|
||||
default_orm: bun
|
||||
enable_metrics: true
|
||||
|
||||
# SQLite cache
|
||||
cache-db:
|
||||
type: sqlite
|
||||
filepath: /var/lib/app/cache.db
|
||||
max_open_conns: 1
|
||||
|
||||
# MongoDB for documents
|
||||
documents:
|
||||
type: mongodb
|
||||
host: localhost
|
||||
port: 27017
|
||||
database: documents
|
||||
user: mongouser
|
||||
password: mongopass
|
||||
auth_source: admin
|
||||
enable_metrics: true
|
||||
```
|
||||
|
||||
### 2. Initialize Manager
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/config"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/dbmanager"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Load configuration
|
||||
cfgMgr := config.NewManager()
|
||||
if err := cfgMgr.Load(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cfg, _ := cfgMgr.GetConfig()
|
||||
|
||||
// Create database manager
|
||||
mgr, err := dbmanager.NewManager(cfg.DBManager)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer mgr.Close()
|
||||
|
||||
// Connect all databases
|
||||
ctx := context.Background()
|
||||
if err := mgr.Connect(ctx); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Your application code here...
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Use Database Connections
|
||||
|
||||
#### Get Default Database
|
||||
|
||||
```go
|
||||
// Get the default database (as configured common.Database interface)
|
||||
db, err := mgr.GetDefaultDatabase()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Use it with any query
|
||||
var users []User
|
||||
err = db.NewSelect().
|
||||
Model(&users).
|
||||
Where("active = ?", true).
|
||||
Scan(ctx, &users)
|
||||
```
|
||||
|
||||
#### Get Named Connection with Specific ORM
|
||||
|
||||
```go
|
||||
// Get primary connection
|
||||
primary, err := mgr.Get("primary")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Use with Bun
|
||||
bunDB, err := primary.Bun()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = bunDB.NewSelect().Model(&users).Scan(ctx)
|
||||
|
||||
// Use with GORM (same underlying connection!)
|
||||
gormDB, err := primary.GORM()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
gormDB.Where("active = ?", true).Find(&users)
|
||||
|
||||
// Use native *sql.DB
|
||||
nativeDB, err := primary.Native()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
rows, err := nativeDB.QueryContext(ctx, "SELECT * FROM users WHERE active = $1", true)
|
||||
```
|
||||
|
||||
#### Use MongoDB
|
||||
|
||||
```go
|
||||
// Get MongoDB connection
|
||||
docs, err := mgr.Get("documents")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
mongoClient, err := docs.MongoDB()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
collection := mongoClient.Database("documents").Collection("articles")
|
||||
// Use MongoDB driver...
|
||||
```
|
||||
|
||||
#### Change Default Database
|
||||
|
||||
```go
|
||||
// Switch to analytics database as default
|
||||
err := mgr.SetDefaultDatabase("analytics")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Now GetDefaultDatabase() returns the analytics connection
|
||||
db, _ := mgr.GetDefaultDatabase()
|
||||
```
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
### Manager Configuration
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
|-------|------|---------|-------------|
|
||||
| `default_connection` | string | "" | Name of the default connection |
|
||||
| `connections` | map | {} | Map of connection name to ConnectionConfig |
|
||||
| `max_open_conns` | int | 25 | Global default for max open connections |
|
||||
| `max_idle_conns` | int | 5 | Global default for max idle connections |
|
||||
| `conn_max_lifetime` | duration | 30m | Global default for connection max lifetime |
|
||||
| `conn_max_idle_time` | duration | 5m | Global default for connection max idle time |
|
||||
| `retry_attempts` | int | 3 | Number of connection retry attempts |
|
||||
| `retry_delay` | duration | 1s | Initial retry delay |
|
||||
| `retry_max_delay` | duration | 10s | Maximum retry delay |
|
||||
| `health_check_interval` | duration | 30s | Interval between health checks |
|
||||
| `enable_auto_reconnect` | bool | true | Auto-reconnect on health check failure |
|
||||
|
||||
### Connection Configuration
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `name` | string | Unique connection name |
|
||||
| `type` | string | Database type: `postgres`, `sqlite`, `mssql`, `mongodb` |
|
||||
| `dsn` | string | Complete connection string (overrides individual params) |
|
||||
| `host` | string | Database host |
|
||||
| `port` | int | Database port |
|
||||
| `user` | string | Username |
|
||||
| `password` | string | Password |
|
||||
| `database` | string | Database name |
|
||||
| `sslmode` | string | SSL mode (postgres/mssql): `disable`, `require`, etc. |
|
||||
| `schema` | string | Default schema (postgres/mssql) |
|
||||
| `filepath` | string | File path (sqlite only) |
|
||||
| `auth_source` | string | Auth source (mongodb) |
|
||||
| `replica_set` | string | Replica set name (mongodb) |
|
||||
| `read_preference` | string | Read preference (mongodb): `primary`, `secondary`, etc. |
|
||||
| `max_open_conns` | int | Override global max open connections |
|
||||
| `max_idle_conns` | int | Override global max idle connections |
|
||||
| `conn_max_lifetime` | duration | Override global connection max lifetime |
|
||||
| `conn_max_idle_time` | duration | Override global connection max idle time |
|
||||
| `connect_timeout` | duration | Connection timeout (default: 10s) |
|
||||
| `query_timeout` | duration | Query timeout (default: 30s) |
|
||||
| `enable_tracing` | bool | Enable OpenTelemetry tracing |
|
||||
| `enable_metrics` | bool | Enable Prometheus metrics |
|
||||
| `enable_logging` | bool | Enable connection logging |
|
||||
| `default_orm` | string | Default ORM for Database(): `bun`, `gorm`, `native` |
|
||||
| `tags` | map[string]string | Custom tags for filtering/organization |
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Health Checks
|
||||
|
||||
```go
|
||||
// Manual health check
|
||||
if err := mgr.HealthCheck(ctx); err != nil {
|
||||
log.Printf("Health check failed: %v", err)
|
||||
}
|
||||
|
||||
// Per-connection health check
|
||||
primary, _ := mgr.Get("primary")
|
||||
if err := primary.HealthCheck(ctx); err != nil {
|
||||
log.Printf("Primary connection unhealthy: %v", err)
|
||||
|
||||
// Manual reconnect
|
||||
if err := primary.Reconnect(ctx); err != nil {
|
||||
log.Printf("Reconnection failed: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Connection Statistics
|
||||
|
||||
```go
|
||||
// Get overall statistics
|
||||
stats := mgr.Stats()
|
||||
fmt.Printf("Total connections: %d\n", stats.TotalConnections)
|
||||
fmt.Printf("Healthy: %d, Unhealthy: %d\n", stats.HealthyCount, stats.UnhealthyCount)
|
||||
|
||||
// Per-connection stats
|
||||
for name, connStats := range stats.ConnectionStats {
|
||||
fmt.Printf("%s: %d open, %d in use, %d idle\n",
|
||||
name,
|
||||
connStats.OpenConnections,
|
||||
connStats.InUse,
|
||||
connStats.Idle)
|
||||
}
|
||||
|
||||
// Individual connection stats
|
||||
primary, _ := mgr.Get("primary")
|
||||
stats := primary.Stats()
|
||||
fmt.Printf("Wait count: %d, Wait duration: %v\n",
|
||||
stats.WaitCount,
|
||||
stats.WaitDuration)
|
||||
```
|
||||
|
||||
### Prometheus Metrics
|
||||
|
||||
The package automatically exports Prometheus metrics:
|
||||
|
||||
- `dbmanager_connections_total` - Total configured connections by type
|
||||
- `dbmanager_connection_status` - Connection health status (1=healthy, 0=unhealthy)
|
||||
- `dbmanager_connection_pool_size` - Connection pool statistics by state
|
||||
- `dbmanager_connection_wait_count` - Times connections waited for availability
|
||||
- `dbmanager_connection_wait_duration_seconds` - Total wait duration
|
||||
- `dbmanager_health_check_duration_seconds` - Health check execution time
|
||||
- `dbmanager_reconnect_attempts_total` - Reconnection attempts and results
|
||||
- `dbmanager_connection_lifetime_closed_total` - Connections closed due to max lifetime
|
||||
- `dbmanager_connection_idle_closed_total` - Connections closed due to max idle time
|
||||
|
||||
Metrics are automatically updated during health checks. To manually publish metrics:
|
||||
|
||||
```go
|
||||
if mgr, ok := mgr.(*connectionManager); ok {
|
||||
mgr.PublishMetrics()
|
||||
}
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Single Connection Pool, Multiple ORMs
|
||||
|
||||
A key design principle is that Bun, GORM, and Native all wrap the **same underlying `*sql.DB`** connection pool:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┐
|
||||
│ SQL Connection │
|
||||
├─────────────────────────────────────┤
|
||||
│ ┌─────────┐ ┌──────┐ ┌────────┐ │
|
||||
│ │ Bun │ │ GORM │ │ Native │ │
|
||||
│ └────┬────┘ └───┬──┘ └───┬────┘ │
|
||||
│ │ │ │ │
|
||||
│ └───────────┴─────────┘ │
|
||||
│ *sql.DB │
|
||||
│ (single pool) │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- No connection duplication
|
||||
- Consistent pool limits across all ORMs
|
||||
- Unified connection statistics
|
||||
- Lower resource usage
|
||||
|
||||
### Provider Pattern
|
||||
|
||||
Each database type has a dedicated provider:
|
||||
|
||||
- **PostgresProvider** - Uses `pgx` driver
|
||||
- **SQLiteProvider** - Uses `glebarez/sqlite` (pure Go)
|
||||
- **MSSQLProvider** - Uses `go-mssqldb`
|
||||
- **MongoProvider** - Uses official `mongo-driver`
|
||||
|
||||
Providers handle:
|
||||
- Connection establishment with retry logic
|
||||
- Health checking
|
||||
- Connection statistics
|
||||
- Connection cleanup
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Named Connections**: Be explicit about which database you're accessing
|
||||
```go
|
||||
primary, _ := mgr.Get("primary") // Good
|
||||
db, _ := mgr.GetDefaultDatabase() // Risky if default changes
|
||||
```
|
||||
|
||||
2. **Configure Connection Pools**: Tune based on your workload
|
||||
```yaml
|
||||
connections:
|
||||
primary:
|
||||
max_open_conns: 100 # High traffic API
|
||||
max_idle_conns: 25
|
||||
analytics:
|
||||
max_open_conns: 10 # Background analytics
|
||||
max_idle_conns: 2
|
||||
```
|
||||
|
||||
3. **Enable Health Checks**: Catch connection issues early
|
||||
```yaml
|
||||
health_check_interval: 30s
|
||||
enable_auto_reconnect: true
|
||||
```
|
||||
|
||||
4. **Use Appropriate ORM**: Choose based on your needs
|
||||
- **Bun**: Modern, fast, type-safe - recommended for new code
|
||||
- **GORM**: Mature, feature-rich - good for existing GORM code
|
||||
- **Native**: Maximum control - use for performance-critical queries
|
||||
|
||||
5. **Monitor Metrics**: Watch connection pool utilization
|
||||
- If `wait_count` is high, increase `max_open_conns`
|
||||
- If `idle` is always high, decrease `max_idle_conns`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Connection Failures
|
||||
|
||||
If connections fail to establish:
|
||||
|
||||
1. Check configuration:
|
||||
```bash
|
||||
# Test connection manually
|
||||
psql -h localhost -U myuser -d myapp
|
||||
```
|
||||
|
||||
2. Enable logging:
|
||||
```yaml
|
||||
connections:
|
||||
primary:
|
||||
enable_logging: true
|
||||
```
|
||||
|
||||
3. Check retry attempts:
|
||||
```yaml
|
||||
retry_attempts: 5 # Increase retries
|
||||
retry_max_delay: 30s
|
||||
```
|
||||
|
||||
### Pool Exhaustion
|
||||
|
||||
If you see "too many connections" errors:
|
||||
|
||||
1. Increase pool size:
|
||||
```yaml
|
||||
max_open_conns: 50 # Increase from default 25
|
||||
```
|
||||
|
||||
2. Reduce connection lifetime:
|
||||
```yaml
|
||||
conn_max_lifetime: 15m # Recycle faster
|
||||
```
|
||||
|
||||
3. Monitor wait stats:
|
||||
```go
|
||||
stats := primary.Stats()
|
||||
if stats.WaitCount > 1000 {
|
||||
log.Warn("High connection wait count")
|
||||
}
|
||||
```
|
||||
|
||||
### MongoDB vs SQL Confusion
|
||||
|
||||
MongoDB connections don't support SQL ORMs:
|
||||
|
||||
```go
|
||||
docs, _ := mgr.Get("documents")
|
||||
|
||||
// ✓ Correct
|
||||
mongoClient, _ := docs.MongoDB()
|
||||
|
||||
// ✗ Error: ErrNotSQLDatabase
|
||||
bunDB, err := docs.Bun() // Won't work!
|
||||
```
|
||||
|
||||
SQL connections don't support MongoDB:
|
||||
|
||||
```go
|
||||
primary, _ := mgr.Get("primary")
|
||||
|
||||
// ✓ Correct
|
||||
bunDB, _ := primary.Bun()
|
||||
|
||||
// ✗ Error: ErrNotMongoDB
|
||||
mongoClient, err := primary.MongoDB() // Won't work!
|
||||
```
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From Raw `database/sql`
|
||||
|
||||
Before:
|
||||
```go
|
||||
db, err := sql.Open("postgres", dsn)
|
||||
defer db.Close()
|
||||
|
||||
rows, err := db.Query("SELECT * FROM users")
|
||||
```
|
||||
|
||||
After:
|
||||
```go
|
||||
mgr, _ := dbmanager.NewManager(cfg.DBManager)
|
||||
mgr.Connect(ctx)
|
||||
defer mgr.Close()
|
||||
|
||||
primary, _ := mgr.Get("primary")
|
||||
nativeDB, _ := primary.Native()
|
||||
|
||||
rows, err := nativeDB.Query("SELECT * FROM users")
|
||||
```
|
||||
|
||||
### From Direct Bun/GORM
|
||||
|
||||
Before:
|
||||
```go
|
||||
sqldb, _ := sql.Open("pgx", dsn)
|
||||
bunDB := bun.NewDB(sqldb, pgdialect.New())
|
||||
|
||||
var users []User
|
||||
bunDB.NewSelect().Model(&users).Scan(ctx)
|
||||
```
|
||||
|
||||
After:
|
||||
```go
|
||||
mgr, _ := dbmanager.NewManager(cfg.DBManager)
|
||||
mgr.Connect(ctx)
|
||||
|
||||
primary, _ := mgr.Get("primary")
|
||||
bunDB, _ := primary.Bun()
|
||||
|
||||
var users []User
|
||||
bunDB.NewSelect().Model(&users).Scan(ctx)
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Same as the parent project.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome! Please submit issues and pull requests to the main repository.
|
||||
448
pkg/dbmanager/config.go
Normal file
448
pkg/dbmanager/config.go
Normal file
@@ -0,0 +1,448 @@
|
||||
package dbmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/config"
|
||||
)
|
||||
|
||||
// DatabaseType represents the type of database
|
||||
type DatabaseType string
|
||||
|
||||
const (
|
||||
// DatabaseTypePostgreSQL represents PostgreSQL database
|
||||
DatabaseTypePostgreSQL DatabaseType = "postgres"
|
||||
|
||||
// DatabaseTypeSQLite represents SQLite database
|
||||
DatabaseTypeSQLite DatabaseType = "sqlite"
|
||||
|
||||
// DatabaseTypeMSSQL represents Microsoft SQL Server database
|
||||
DatabaseTypeMSSQL DatabaseType = "mssql"
|
||||
|
||||
// DatabaseTypeMongoDB represents MongoDB database
|
||||
DatabaseTypeMongoDB DatabaseType = "mongodb"
|
||||
)
|
||||
|
||||
// ORMType represents the ORM to use for database operations
|
||||
type ORMType string
|
||||
|
||||
const (
|
||||
// ORMTypeBun represents Bun ORM
|
||||
ORMTypeBun ORMType = "bun"
|
||||
|
||||
// ORMTypeGORM represents GORM
|
||||
ORMTypeGORM ORMType = "gorm"
|
||||
|
||||
// ORMTypeNative represents native database/sql
|
||||
ORMTypeNative ORMType = "native"
|
||||
)
|
||||
|
||||
// ManagerConfig contains configuration for the database connection manager
|
||||
type ManagerConfig struct {
|
||||
// DefaultConnection is the name of the default connection to use
|
||||
DefaultConnection string `mapstructure:"default_connection"`
|
||||
|
||||
// Connections is a map of connection name to connection configuration
|
||||
Connections map[string]ConnectionConfig `mapstructure:"connections"`
|
||||
|
||||
// Global connection pool defaults
|
||||
MaxOpenConns int `mapstructure:"max_open_conns"`
|
||||
MaxIdleConns int `mapstructure:"max_idle_conns"`
|
||||
ConnMaxLifetime time.Duration `mapstructure:"conn_max_lifetime"`
|
||||
ConnMaxIdleTime time.Duration `mapstructure:"conn_max_idle_time"`
|
||||
|
||||
// Retry policy
|
||||
RetryAttempts int `mapstructure:"retry_attempts"`
|
||||
RetryDelay time.Duration `mapstructure:"retry_delay"`
|
||||
RetryMaxDelay time.Duration `mapstructure:"retry_max_delay"`
|
||||
|
||||
// Health checks
|
||||
HealthCheckInterval time.Duration `mapstructure:"health_check_interval"`
|
||||
EnableAutoReconnect bool `mapstructure:"enable_auto_reconnect"`
|
||||
}
|
||||
|
||||
// ConnectionConfig defines configuration for a single database connection
|
||||
type ConnectionConfig struct {
|
||||
// Name is the unique name of this connection
|
||||
Name string `mapstructure:"name"`
|
||||
|
||||
// Type is the database type (postgres, sqlite, mssql, mongodb)
|
||||
Type DatabaseType `mapstructure:"type"`
|
||||
|
||||
// DSN is the complete Data Source Name / connection string
|
||||
// If provided, this takes precedence over individual connection parameters
|
||||
DSN string `mapstructure:"dsn"`
|
||||
|
||||
// Connection parameters (used if DSN is not provided)
|
||||
Host string `mapstructure:"host"`
|
||||
Port int `mapstructure:"port"`
|
||||
User string `mapstructure:"user"`
|
||||
Password string `mapstructure:"password"`
|
||||
Database string `mapstructure:"database"`
|
||||
|
||||
// PostgreSQL/MSSQL specific
|
||||
SSLMode string `mapstructure:"sslmode"` // disable, require, verify-ca, verify-full
|
||||
Schema string `mapstructure:"schema"` // Default schema
|
||||
|
||||
// SQLite specific
|
||||
FilePath string `mapstructure:"filepath"`
|
||||
|
||||
// MongoDB specific
|
||||
AuthSource string `mapstructure:"auth_source"`
|
||||
ReplicaSet string `mapstructure:"replica_set"`
|
||||
ReadPreference string `mapstructure:"read_preference"` // primary, secondary, etc.
|
||||
|
||||
// Connection pool settings (overrides global defaults)
|
||||
MaxOpenConns *int `mapstructure:"max_open_conns"`
|
||||
MaxIdleConns *int `mapstructure:"max_idle_conns"`
|
||||
ConnMaxLifetime *time.Duration `mapstructure:"conn_max_lifetime"`
|
||||
ConnMaxIdleTime *time.Duration `mapstructure:"conn_max_idle_time"`
|
||||
|
||||
// Timeouts
|
||||
ConnectTimeout time.Duration `mapstructure:"connect_timeout"`
|
||||
QueryTimeout time.Duration `mapstructure:"query_timeout"`
|
||||
|
||||
// Features
|
||||
EnableTracing bool `mapstructure:"enable_tracing"`
|
||||
EnableMetrics bool `mapstructure:"enable_metrics"`
|
||||
EnableLogging bool `mapstructure:"enable_logging"`
|
||||
|
||||
// DefaultORM specifies which ORM to use for the Database() method
|
||||
// Options: "bun", "gorm", "native"
|
||||
DefaultORM string `mapstructure:"default_orm"`
|
||||
|
||||
// Tags for organization and filtering
|
||||
Tags map[string]string `mapstructure:"tags"`
|
||||
}
|
||||
|
||||
// DefaultManagerConfig returns a ManagerConfig with sensible defaults
|
||||
func DefaultManagerConfig() ManagerConfig {
|
||||
return ManagerConfig{
|
||||
DefaultConnection: "",
|
||||
Connections: make(map[string]ConnectionConfig),
|
||||
MaxOpenConns: 25,
|
||||
MaxIdleConns: 5,
|
||||
ConnMaxLifetime: 30 * time.Minute,
|
||||
ConnMaxIdleTime: 5 * time.Minute,
|
||||
RetryAttempts: 3,
|
||||
RetryDelay: 1 * time.Second,
|
||||
RetryMaxDelay: 10 * time.Second,
|
||||
HealthCheckInterval: 30 * time.Second,
|
||||
EnableAutoReconnect: true,
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyDefaults applies default values to the manager configuration
|
||||
func (c *ManagerConfig) ApplyDefaults() {
|
||||
defaults := DefaultManagerConfig()
|
||||
|
||||
if c.MaxOpenConns == 0 {
|
||||
c.MaxOpenConns = defaults.MaxOpenConns
|
||||
}
|
||||
if c.MaxIdleConns == 0 {
|
||||
c.MaxIdleConns = defaults.MaxIdleConns
|
||||
}
|
||||
if c.ConnMaxLifetime == 0 {
|
||||
c.ConnMaxLifetime = defaults.ConnMaxLifetime
|
||||
}
|
||||
if c.ConnMaxIdleTime == 0 {
|
||||
c.ConnMaxIdleTime = defaults.ConnMaxIdleTime
|
||||
}
|
||||
if c.RetryAttempts == 0 {
|
||||
c.RetryAttempts = defaults.RetryAttempts
|
||||
}
|
||||
if c.RetryDelay == 0 {
|
||||
c.RetryDelay = defaults.RetryDelay
|
||||
}
|
||||
if c.RetryMaxDelay == 0 {
|
||||
c.RetryMaxDelay = defaults.RetryMaxDelay
|
||||
}
|
||||
if c.HealthCheckInterval == 0 {
|
||||
c.HealthCheckInterval = defaults.HealthCheckInterval
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates the manager configuration
|
||||
func (c *ManagerConfig) Validate() error {
|
||||
if len(c.Connections) == 0 {
|
||||
return NewConfigurationError("connections", fmt.Errorf("at least one connection must be configured"))
|
||||
}
|
||||
|
||||
if c.DefaultConnection != "" {
|
||||
if _, ok := c.Connections[c.DefaultConnection]; !ok {
|
||||
return NewConfigurationError("default_connection", fmt.Errorf("default connection '%s' not found in connections", c.DefaultConnection))
|
||||
}
|
||||
}
|
||||
|
||||
// Validate each connection
|
||||
for name := range c.Connections {
|
||||
conn := c.Connections[name]
|
||||
if err := conn.Validate(); err != nil {
|
||||
return fmt.Errorf("connection '%s': %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyDefaults applies default values and global settings to the connection configuration
|
||||
func (cc *ConnectionConfig) ApplyDefaults(global *ManagerConfig) {
|
||||
// Set name if not already set
|
||||
if cc.Name == "" {
|
||||
cc.Name = "unnamed"
|
||||
}
|
||||
|
||||
// Apply global pool settings if not overridden
|
||||
if cc.MaxOpenConns == nil && global != nil {
|
||||
maxOpen := global.MaxOpenConns
|
||||
cc.MaxOpenConns = &maxOpen
|
||||
}
|
||||
if cc.MaxIdleConns == nil && global != nil {
|
||||
maxIdle := global.MaxIdleConns
|
||||
cc.MaxIdleConns = &maxIdle
|
||||
}
|
||||
if cc.ConnMaxLifetime == nil && global != nil {
|
||||
lifetime := global.ConnMaxLifetime
|
||||
cc.ConnMaxLifetime = &lifetime
|
||||
}
|
||||
if cc.ConnMaxIdleTime == nil && global != nil {
|
||||
idleTime := global.ConnMaxIdleTime
|
||||
cc.ConnMaxIdleTime = &idleTime
|
||||
}
|
||||
|
||||
// Default timeouts
|
||||
if cc.ConnectTimeout == 0 {
|
||||
cc.ConnectTimeout = 10 * time.Second
|
||||
}
|
||||
if cc.QueryTimeout == 0 {
|
||||
cc.QueryTimeout = 30 * time.Second
|
||||
}
|
||||
|
||||
// Default ORM
|
||||
if cc.DefaultORM == "" {
|
||||
cc.DefaultORM = string(ORMTypeBun)
|
||||
}
|
||||
|
||||
// Default PostgreSQL port
|
||||
if cc.Type == DatabaseTypePostgreSQL && cc.Port == 0 && cc.DSN == "" {
|
||||
cc.Port = 5432
|
||||
}
|
||||
|
||||
// Default MSSQL port
|
||||
if cc.Type == DatabaseTypeMSSQL && cc.Port == 0 && cc.DSN == "" {
|
||||
cc.Port = 1433
|
||||
}
|
||||
|
||||
// Default MongoDB port
|
||||
if cc.Type == DatabaseTypeMongoDB && cc.Port == 0 && cc.DSN == "" {
|
||||
cc.Port = 27017
|
||||
}
|
||||
|
||||
// Default MongoDB auth source
|
||||
if cc.Type == DatabaseTypeMongoDB && cc.AuthSource == "" {
|
||||
cc.AuthSource = "admin"
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates the connection configuration
|
||||
func (cc *ConnectionConfig) Validate() error {
|
||||
// Validate database type
|
||||
switch cc.Type {
|
||||
case DatabaseTypePostgreSQL, DatabaseTypeSQLite, DatabaseTypeMSSQL, DatabaseTypeMongoDB:
|
||||
// Valid types
|
||||
default:
|
||||
return NewConfigurationError("type", fmt.Errorf("unsupported database type: %s", cc.Type))
|
||||
}
|
||||
|
||||
// Validate that either DSN or connection parameters are provided
|
||||
if cc.DSN == "" {
|
||||
switch cc.Type {
|
||||
case DatabaseTypePostgreSQL, DatabaseTypeMSSQL, DatabaseTypeMongoDB:
|
||||
if cc.Host == "" {
|
||||
return NewConfigurationError("host", fmt.Errorf("host is required when DSN is not provided"))
|
||||
}
|
||||
if cc.Database == "" {
|
||||
return NewConfigurationError("database", fmt.Errorf("database is required when DSN is not provided"))
|
||||
}
|
||||
case DatabaseTypeSQLite:
|
||||
if cc.FilePath == "" {
|
||||
return NewConfigurationError("filepath", fmt.Errorf("filepath is required for SQLite when DSN is not provided"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate ORM type
|
||||
if cc.DefaultORM != "" {
|
||||
switch ORMType(cc.DefaultORM) {
|
||||
case ORMTypeBun, ORMTypeGORM, ORMTypeNative:
|
||||
// Valid ORM types
|
||||
default:
|
||||
return NewConfigurationError("default_orm", fmt.Errorf("unsupported ORM type: %s", cc.DefaultORM))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuildDSN builds a connection string from individual parameters
|
||||
func (cc *ConnectionConfig) BuildDSN() (string, error) {
|
||||
// If DSN is already provided, use it
|
||||
if cc.DSN != "" {
|
||||
return cc.DSN, nil
|
||||
}
|
||||
|
||||
switch cc.Type {
|
||||
case DatabaseTypePostgreSQL:
|
||||
return cc.buildPostgresDSN(), nil
|
||||
case DatabaseTypeSQLite:
|
||||
return cc.buildSQLiteDSN(), nil
|
||||
case DatabaseTypeMSSQL:
|
||||
return cc.buildMSSQLDSN(), nil
|
||||
case DatabaseTypeMongoDB:
|
||||
return cc.buildMongoDSN(), nil
|
||||
default:
|
||||
return "", fmt.Errorf("cannot build DSN for database type: %s", cc.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ConnectionConfig) buildPostgresDSN() string {
|
||||
dsn := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s",
|
||||
cc.Host, cc.Port, cc.User, cc.Password, cc.Database)
|
||||
|
||||
if cc.SSLMode != "" {
|
||||
dsn += fmt.Sprintf(" sslmode=%s", cc.SSLMode)
|
||||
} else {
|
||||
dsn += " sslmode=disable"
|
||||
}
|
||||
|
||||
if cc.Schema != "" {
|
||||
dsn += fmt.Sprintf(" search_path=%s", cc.Schema)
|
||||
}
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
||||
func (cc *ConnectionConfig) buildSQLiteDSN() string {
|
||||
if cc.FilePath != "" {
|
||||
return cc.FilePath
|
||||
}
|
||||
return ":memory:"
|
||||
}
|
||||
|
||||
func (cc *ConnectionConfig) buildMSSQLDSN() string {
|
||||
// Format: sqlserver://username:password@host:port?database=dbname
|
||||
dsn := fmt.Sprintf("sqlserver://%s:%s@%s:%d?database=%s",
|
||||
cc.User, cc.Password, cc.Host, cc.Port, cc.Database)
|
||||
|
||||
if cc.Schema != "" {
|
||||
dsn += fmt.Sprintf("&schema=%s", cc.Schema)
|
||||
}
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
||||
func (cc *ConnectionConfig) buildMongoDSN() string {
|
||||
// Format: mongodb://username:password@host:port/database?authSource=admin
|
||||
var dsn string
|
||||
|
||||
if cc.User != "" && cc.Password != "" {
|
||||
dsn = fmt.Sprintf("mongodb://%s:%s@%s:%d/%s",
|
||||
cc.User, cc.Password, cc.Host, cc.Port, cc.Database)
|
||||
} else {
|
||||
dsn = fmt.Sprintf("mongodb://%s:%d/%s", cc.Host, cc.Port, cc.Database)
|
||||
}
|
||||
|
||||
params := ""
|
||||
if cc.AuthSource != "" {
|
||||
params += fmt.Sprintf("authSource=%s", cc.AuthSource)
|
||||
}
|
||||
if cc.ReplicaSet != "" {
|
||||
if params != "" {
|
||||
params += "&"
|
||||
}
|
||||
params += fmt.Sprintf("replicaSet=%s", cc.ReplicaSet)
|
||||
}
|
||||
if cc.ReadPreference != "" {
|
||||
if params != "" {
|
||||
params += "&"
|
||||
}
|
||||
params += fmt.Sprintf("readPreference=%s", cc.ReadPreference)
|
||||
}
|
||||
|
||||
if params != "" {
|
||||
dsn += "?" + params
|
||||
}
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
||||
// FromConfig converts config.DBManagerConfig to internal ManagerConfig
|
||||
func FromConfig(cfg config.DBManagerConfig) ManagerConfig {
|
||||
mgr := ManagerConfig{
|
||||
DefaultConnection: cfg.DefaultConnection,
|
||||
Connections: make(map[string]ConnectionConfig),
|
||||
MaxOpenConns: cfg.MaxOpenConns,
|
||||
MaxIdleConns: cfg.MaxIdleConns,
|
||||
ConnMaxLifetime: cfg.ConnMaxLifetime,
|
||||
ConnMaxIdleTime: cfg.ConnMaxIdleTime,
|
||||
RetryAttempts: cfg.RetryAttempts,
|
||||
RetryDelay: cfg.RetryDelay,
|
||||
RetryMaxDelay: cfg.RetryMaxDelay,
|
||||
HealthCheckInterval: cfg.HealthCheckInterval,
|
||||
EnableAutoReconnect: cfg.EnableAutoReconnect,
|
||||
}
|
||||
|
||||
// Convert connections
|
||||
for name := range cfg.Connections {
|
||||
connCfg := cfg.Connections[name]
|
||||
mgr.Connections[name] = ConnectionConfig{
|
||||
Name: connCfg.Name,
|
||||
Type: DatabaseType(connCfg.Type),
|
||||
DSN: connCfg.DSN,
|
||||
Host: connCfg.Host,
|
||||
Port: connCfg.Port,
|
||||
User: connCfg.User,
|
||||
Password: connCfg.Password,
|
||||
Database: connCfg.Database,
|
||||
SSLMode: connCfg.SSLMode,
|
||||
Schema: connCfg.Schema,
|
||||
FilePath: connCfg.FilePath,
|
||||
AuthSource: connCfg.AuthSource,
|
||||
ReplicaSet: connCfg.ReplicaSet,
|
||||
ReadPreference: connCfg.ReadPreference,
|
||||
MaxOpenConns: connCfg.MaxOpenConns,
|
||||
MaxIdleConns: connCfg.MaxIdleConns,
|
||||
ConnMaxLifetime: connCfg.ConnMaxLifetime,
|
||||
ConnMaxIdleTime: connCfg.ConnMaxIdleTime,
|
||||
ConnectTimeout: connCfg.ConnectTimeout,
|
||||
QueryTimeout: connCfg.QueryTimeout,
|
||||
EnableTracing: connCfg.EnableTracing,
|
||||
EnableMetrics: connCfg.EnableMetrics,
|
||||
EnableLogging: connCfg.EnableLogging,
|
||||
DefaultORM: connCfg.DefaultORM,
|
||||
Tags: connCfg.Tags,
|
||||
}
|
||||
}
|
||||
|
||||
return mgr
|
||||
}
|
||||
|
||||
// Getter methods to implement providers.ConnectionConfig interface
|
||||
func (cc *ConnectionConfig) GetName() string { return cc.Name }
|
||||
func (cc *ConnectionConfig) GetType() string { return string(cc.Type) }
|
||||
func (cc *ConnectionConfig) GetHost() string { return cc.Host }
|
||||
func (cc *ConnectionConfig) GetPort() int { return cc.Port }
|
||||
func (cc *ConnectionConfig) GetUser() string { return cc.User }
|
||||
func (cc *ConnectionConfig) GetPassword() string { return cc.Password }
|
||||
func (cc *ConnectionConfig) GetDatabase() string { return cc.Database }
|
||||
func (cc *ConnectionConfig) GetFilePath() string { return cc.FilePath }
|
||||
func (cc *ConnectionConfig) GetConnectTimeout() time.Duration { return cc.ConnectTimeout }
|
||||
func (cc *ConnectionConfig) GetEnableLogging() bool { return cc.EnableLogging }
|
||||
func (cc *ConnectionConfig) GetMaxOpenConns() *int { return cc.MaxOpenConns }
|
||||
func (cc *ConnectionConfig) GetMaxIdleConns() *int { return cc.MaxIdleConns }
|
||||
func (cc *ConnectionConfig) GetConnMaxLifetime() *time.Duration { return cc.ConnMaxLifetime }
|
||||
func (cc *ConnectionConfig) GetConnMaxIdleTime() *time.Duration { return cc.ConnMaxIdleTime }
|
||||
func (cc *ConnectionConfig) GetQueryTimeout() time.Duration { return cc.QueryTimeout }
|
||||
func (cc *ConnectionConfig) GetEnableMetrics() bool { return cc.EnableMetrics }
|
||||
func (cc *ConnectionConfig) GetReadPreference() string { return cc.ReadPreference }
|
||||
607
pkg/dbmanager/connection.go
Normal file
607
pkg/dbmanager/connection.go
Normal file
@@ -0,0 +1,607 @@
|
||||
package dbmanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/schema"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common/adapters/database"
|
||||
)
|
||||
|
||||
// Connection represents a single named database connection
|
||||
type Connection interface {
|
||||
// Metadata
|
||||
Name() string
|
||||
Type() DatabaseType
|
||||
|
||||
// ORM Access (SQL databases only)
|
||||
Bun() (*bun.DB, error)
|
||||
GORM() (*gorm.DB, error)
|
||||
Native() (*sql.DB, error)
|
||||
|
||||
// Common Database interface (for SQL databases)
|
||||
Database() (common.Database, error)
|
||||
|
||||
// MongoDB Access (MongoDB only)
|
||||
MongoDB() (*mongo.Client, error)
|
||||
|
||||
// Lifecycle
|
||||
Connect(ctx context.Context) error
|
||||
Close() error
|
||||
HealthCheck(ctx context.Context) error
|
||||
Reconnect(ctx context.Context) error
|
||||
|
||||
// Stats
|
||||
Stats() *ConnectionStats
|
||||
}
|
||||
|
||||
// ConnectionStats contains statistics about a database connection
|
||||
type ConnectionStats struct {
|
||||
Name string
|
||||
Type DatabaseType
|
||||
Connected bool
|
||||
LastHealthCheck time.Time
|
||||
HealthCheckStatus string
|
||||
|
||||
// SQL connection pool stats
|
||||
OpenConnections int
|
||||
InUse int
|
||||
Idle int
|
||||
WaitCount int64
|
||||
WaitDuration time.Duration
|
||||
MaxIdleClosed int64
|
||||
MaxLifetimeClosed int64
|
||||
}
|
||||
|
||||
// sqlConnection implements Connection for SQL databases (PostgreSQL, SQLite, MSSQL)
|
||||
type sqlConnection struct {
|
||||
name string
|
||||
dbType DatabaseType
|
||||
config ConnectionConfig
|
||||
provider Provider
|
||||
|
||||
// Lazy-initialized ORM instances (all wrap the same sql.DB)
|
||||
nativeDB *sql.DB
|
||||
bunDB *bun.DB
|
||||
gormDB *gorm.DB
|
||||
|
||||
// Adapters for common.Database interface
|
||||
bunAdapter *database.BunAdapter
|
||||
gormAdapter *database.GormAdapter
|
||||
nativeAdapter common.Database
|
||||
|
||||
// State
|
||||
connected bool
|
||||
mu sync.RWMutex
|
||||
|
||||
// Health check
|
||||
lastHealthCheck time.Time
|
||||
healthCheckStatus string
|
||||
}
|
||||
|
||||
// newSQLConnection creates a new SQL connection
|
||||
func newSQLConnection(name string, dbType DatabaseType, config ConnectionConfig, provider Provider) *sqlConnection {
|
||||
return &sqlConnection{
|
||||
name: name,
|
||||
dbType: dbType,
|
||||
config: config,
|
||||
provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the connection name
|
||||
func (c *sqlConnection) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// Type returns the database type
|
||||
func (c *sqlConnection) Type() DatabaseType {
|
||||
return c.dbType
|
||||
}
|
||||
|
||||
// Connect establishes the database connection
|
||||
func (c *sqlConnection) Connect(ctx context.Context) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.connected {
|
||||
return ErrAlreadyConnected
|
||||
}
|
||||
|
||||
if err := c.provider.Connect(ctx, &c.config); err != nil {
|
||||
return NewConnectionError(c.name, "connect", err)
|
||||
}
|
||||
|
||||
c.connected = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the database connection and all ORM instances
|
||||
func (c *sqlConnection) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if !c.connected {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close Bun if initialized
|
||||
if c.bunDB != nil {
|
||||
if err := c.bunDB.Close(); err != nil {
|
||||
return NewConnectionError(c.name, "close bun", err)
|
||||
}
|
||||
}
|
||||
|
||||
// GORM doesn't have a separate close - it uses the underlying sql.DB
|
||||
|
||||
// Close the provider (which closes the underlying sql.DB)
|
||||
if err := c.provider.Close(); err != nil {
|
||||
return NewConnectionError(c.name, "close", err)
|
||||
}
|
||||
|
||||
c.connected = false
|
||||
c.nativeDB = nil
|
||||
c.bunDB = nil
|
||||
c.gormDB = nil
|
||||
c.bunAdapter = nil
|
||||
c.gormAdapter = nil
|
||||
c.nativeAdapter = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HealthCheck verifies the connection is alive
|
||||
func (c *sqlConnection) HealthCheck(ctx context.Context) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.lastHealthCheck = time.Now()
|
||||
|
||||
if !c.connected {
|
||||
c.healthCheckStatus = "disconnected"
|
||||
return ErrConnectionClosed
|
||||
}
|
||||
|
||||
if err := c.provider.HealthCheck(ctx); err != nil {
|
||||
c.healthCheckStatus = "unhealthy: " + err.Error()
|
||||
return NewConnectionError(c.name, "health check", err)
|
||||
}
|
||||
|
||||
c.healthCheckStatus = "healthy"
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconnect closes and re-establishes the connection
|
||||
func (c *sqlConnection) Reconnect(ctx context.Context) error {
|
||||
if err := c.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Connect(ctx)
|
||||
}
|
||||
|
||||
// Native returns the native *sql.DB connection
|
||||
func (c *sqlConnection) Native() (*sql.DB, error) {
|
||||
c.mu.RLock()
|
||||
if c.nativeDB != nil {
|
||||
defer c.mu.RUnlock()
|
||||
return c.nativeDB, nil
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock
|
||||
if c.nativeDB != nil {
|
||||
return c.nativeDB, nil
|
||||
}
|
||||
|
||||
if !c.connected {
|
||||
return nil, ErrConnectionClosed
|
||||
}
|
||||
|
||||
// Get native connection from provider
|
||||
db, err := c.provider.GetNative()
|
||||
if err != nil {
|
||||
return nil, NewConnectionError(c.name, "get native", err)
|
||||
}
|
||||
|
||||
c.nativeDB = db
|
||||
return c.nativeDB, nil
|
||||
}
|
||||
|
||||
// Bun returns a Bun ORM instance wrapping the native connection
|
||||
func (c *sqlConnection) Bun() (*bun.DB, error) {
|
||||
c.mu.RLock()
|
||||
if c.bunDB != nil {
|
||||
defer c.mu.RUnlock()
|
||||
return c.bunDB, nil
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock
|
||||
if c.bunDB != nil {
|
||||
return c.bunDB, nil
|
||||
}
|
||||
|
||||
// Get native connection first
|
||||
native, err := c.provider.GetNative()
|
||||
if err != nil {
|
||||
return nil, NewConnectionError(c.name, "get bun", err)
|
||||
}
|
||||
|
||||
// Create Bun DB wrapping the same sql.DB
|
||||
dialect := c.getBunDialect()
|
||||
c.bunDB = bun.NewDB(native, dialect)
|
||||
|
||||
return c.bunDB, nil
|
||||
}
|
||||
|
||||
// GORM returns a GORM instance wrapping the native connection
|
||||
func (c *sqlConnection) GORM() (*gorm.DB, error) {
|
||||
c.mu.RLock()
|
||||
if c.gormDB != nil {
|
||||
defer c.mu.RUnlock()
|
||||
return c.gormDB, nil
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock
|
||||
if c.gormDB != nil {
|
||||
return c.gormDB, nil
|
||||
}
|
||||
|
||||
// Get native connection first
|
||||
native, err := c.provider.GetNative()
|
||||
if err != nil {
|
||||
return nil, NewConnectionError(c.name, "get gorm", err)
|
||||
}
|
||||
|
||||
// Create GORM DB wrapping the same sql.DB
|
||||
dialector := c.getGORMDialector(native)
|
||||
db, err := gorm.Open(dialector, &gorm.Config{})
|
||||
if err != nil {
|
||||
return nil, NewConnectionError(c.name, "initialize gorm", err)
|
||||
}
|
||||
|
||||
c.gormDB = db
|
||||
return c.gormDB, nil
|
||||
}
|
||||
|
||||
// Database returns the common.Database interface using the configured default ORM
|
||||
func (c *sqlConnection) Database() (common.Database, error) {
|
||||
c.mu.RLock()
|
||||
defaultORM := c.config.DefaultORM
|
||||
c.mu.RUnlock()
|
||||
|
||||
switch ORMType(defaultORM) {
|
||||
case ORMTypeBun:
|
||||
return c.getBunAdapter()
|
||||
case ORMTypeGORM:
|
||||
return c.getGORMAdapter()
|
||||
case ORMTypeNative:
|
||||
return c.getNativeAdapter()
|
||||
default:
|
||||
// Default to Bun
|
||||
return c.getBunAdapter()
|
||||
}
|
||||
}
|
||||
|
||||
// MongoDB returns an error for SQL connections
|
||||
func (c *sqlConnection) MongoDB() (*mongo.Client, error) {
|
||||
return nil, ErrNotMongoDB
|
||||
}
|
||||
|
||||
// Stats returns connection statistics
|
||||
func (c *sqlConnection) Stats() *ConnectionStats {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
stats := &ConnectionStats{
|
||||
Name: c.name,
|
||||
Type: c.dbType,
|
||||
Connected: c.connected,
|
||||
LastHealthCheck: c.lastHealthCheck,
|
||||
HealthCheckStatus: c.healthCheckStatus,
|
||||
}
|
||||
|
||||
// Get SQL stats if connected
|
||||
if c.connected && c.provider != nil {
|
||||
if providerStats := c.provider.Stats(); providerStats != nil {
|
||||
stats.OpenConnections = providerStats.OpenConnections
|
||||
stats.InUse = providerStats.InUse
|
||||
stats.Idle = providerStats.Idle
|
||||
stats.WaitCount = providerStats.WaitCount
|
||||
stats.WaitDuration = providerStats.WaitDuration
|
||||
stats.MaxIdleClosed = providerStats.MaxIdleClosed
|
||||
stats.MaxLifetimeClosed = providerStats.MaxLifetimeClosed
|
||||
}
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
// getBunAdapter returns or creates the Bun adapter
|
||||
func (c *sqlConnection) getBunAdapter() (common.Database, error) {
|
||||
c.mu.RLock()
|
||||
if c.bunAdapter != nil {
|
||||
defer c.mu.RUnlock()
|
||||
return c.bunAdapter, nil
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.bunAdapter != nil {
|
||||
return c.bunAdapter, nil
|
||||
}
|
||||
|
||||
bunDB, err := c.Bun()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.bunAdapter = database.NewBunAdapter(bunDB)
|
||||
return c.bunAdapter, nil
|
||||
}
|
||||
|
||||
// getGORMAdapter returns or creates the GORM adapter
|
||||
func (c *sqlConnection) getGORMAdapter() (common.Database, error) {
|
||||
c.mu.RLock()
|
||||
if c.gormAdapter != nil {
|
||||
defer c.mu.RUnlock()
|
||||
return c.gormAdapter, nil
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.gormAdapter != nil {
|
||||
return c.gormAdapter, nil
|
||||
}
|
||||
|
||||
gormDB, err := c.GORM()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.gormAdapter = database.NewGormAdapter(gormDB)
|
||||
return c.gormAdapter, nil
|
||||
}
|
||||
|
||||
// getNativeAdapter returns or creates the native adapter
|
||||
func (c *sqlConnection) getNativeAdapter() (common.Database, error) {
|
||||
c.mu.RLock()
|
||||
if c.nativeAdapter != nil {
|
||||
defer c.mu.RUnlock()
|
||||
return c.nativeAdapter, nil
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.nativeAdapter != nil {
|
||||
return c.nativeAdapter, nil
|
||||
}
|
||||
|
||||
native, err := c.Native()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a native adapter based on database type
|
||||
switch c.dbType {
|
||||
case DatabaseTypePostgreSQL:
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(native)
|
||||
case DatabaseTypeSQLite:
|
||||
// For SQLite, we'll use the PgSQL adapter as it works with standard sql.DB
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(native)
|
||||
case DatabaseTypeMSSQL:
|
||||
// For MSSQL, we'll use the PgSQL adapter as it works with standard sql.DB
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(native)
|
||||
default:
|
||||
return nil, ErrUnsupportedDatabase
|
||||
}
|
||||
|
||||
return c.nativeAdapter, nil
|
||||
}
|
||||
|
||||
// getBunDialect returns the appropriate Bun dialect for the database type
|
||||
func (c *sqlConnection) getBunDialect() schema.Dialect {
|
||||
switch c.dbType {
|
||||
case DatabaseTypePostgreSQL:
|
||||
return database.GetPostgresDialect()
|
||||
case DatabaseTypeSQLite:
|
||||
return database.GetSQLiteDialect()
|
||||
case DatabaseTypeMSSQL:
|
||||
return database.GetMSSQLDialect()
|
||||
default:
|
||||
// Default to PostgreSQL
|
||||
return database.GetPostgresDialect()
|
||||
}
|
||||
}
|
||||
|
||||
// getGORMDialector returns the appropriate GORM dialector for the database type
|
||||
func (c *sqlConnection) getGORMDialector(db *sql.DB) gorm.Dialector {
|
||||
switch c.dbType {
|
||||
case DatabaseTypePostgreSQL:
|
||||
return database.GetPostgresDialector(db)
|
||||
case DatabaseTypeSQLite:
|
||||
return database.GetSQLiteDialector(db)
|
||||
case DatabaseTypeMSSQL:
|
||||
return database.GetMSSQLDialector(db)
|
||||
default:
|
||||
// Default to PostgreSQL
|
||||
return database.GetPostgresDialector(db)
|
||||
}
|
||||
}
|
||||
|
||||
// mongoConnection implements Connection for MongoDB
|
||||
type mongoConnection struct {
|
||||
name string
|
||||
config ConnectionConfig
|
||||
provider Provider
|
||||
|
||||
// MongoDB client
|
||||
client *mongo.Client
|
||||
|
||||
// State
|
||||
connected bool
|
||||
mu sync.RWMutex
|
||||
|
||||
// Health check
|
||||
lastHealthCheck time.Time
|
||||
healthCheckStatus string
|
||||
}
|
||||
|
||||
// newMongoConnection creates a new MongoDB connection
|
||||
func newMongoConnection(name string, config ConnectionConfig, provider Provider) *mongoConnection {
|
||||
return &mongoConnection{
|
||||
name: name,
|
||||
config: config,
|
||||
provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the connection name
|
||||
func (c *mongoConnection) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// Type returns the database type (MongoDB)
|
||||
func (c *mongoConnection) Type() DatabaseType {
|
||||
return DatabaseTypeMongoDB
|
||||
}
|
||||
|
||||
// Connect establishes the MongoDB connection
|
||||
func (c *mongoConnection) Connect(ctx context.Context) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.connected {
|
||||
return ErrAlreadyConnected
|
||||
}
|
||||
|
||||
if err := c.provider.Connect(ctx, &c.config); err != nil {
|
||||
return NewConnectionError(c.name, "connect", err)
|
||||
}
|
||||
|
||||
// Get the mongo client
|
||||
client, err := c.provider.GetMongo()
|
||||
if err != nil {
|
||||
return NewConnectionError(c.name, "get mongo client", err)
|
||||
}
|
||||
|
||||
c.client = client
|
||||
c.connected = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the MongoDB connection
|
||||
func (c *mongoConnection) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if !c.connected {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.provider.Close(); err != nil {
|
||||
return NewConnectionError(c.name, "close", err)
|
||||
}
|
||||
|
||||
c.connected = false
|
||||
c.client = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// HealthCheck verifies the MongoDB connection is alive
|
||||
func (c *mongoConnection) HealthCheck(ctx context.Context) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.lastHealthCheck = time.Now()
|
||||
|
||||
if !c.connected {
|
||||
c.healthCheckStatus = "disconnected"
|
||||
return ErrConnectionClosed
|
||||
}
|
||||
|
||||
if err := c.provider.HealthCheck(ctx); err != nil {
|
||||
c.healthCheckStatus = "unhealthy: " + err.Error()
|
||||
return NewConnectionError(c.name, "health check", err)
|
||||
}
|
||||
|
||||
c.healthCheckStatus = "healthy"
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconnect closes and re-establishes the MongoDB connection
|
||||
func (c *mongoConnection) Reconnect(ctx context.Context) error {
|
||||
if err := c.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Connect(ctx)
|
||||
}
|
||||
|
||||
// MongoDB returns the MongoDB client
|
||||
func (c *mongoConnection) MongoDB() (*mongo.Client, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
if !c.connected || c.client == nil {
|
||||
return nil, ErrConnectionClosed
|
||||
}
|
||||
|
||||
return c.client, nil
|
||||
}
|
||||
|
||||
// Bun returns an error for MongoDB connections
|
||||
func (c *mongoConnection) Bun() (*bun.DB, error) {
|
||||
return nil, ErrNotSQLDatabase
|
||||
}
|
||||
|
||||
// GORM returns an error for MongoDB connections
|
||||
func (c *mongoConnection) GORM() (*gorm.DB, error) {
|
||||
return nil, ErrNotSQLDatabase
|
||||
}
|
||||
|
||||
// Native returns an error for MongoDB connections
|
||||
func (c *mongoConnection) Native() (*sql.DB, error) {
|
||||
return nil, ErrNotSQLDatabase
|
||||
}
|
||||
|
||||
// Database returns an error for MongoDB connections
|
||||
func (c *mongoConnection) Database() (common.Database, error) {
|
||||
return nil, ErrNotSQLDatabase
|
||||
}
|
||||
|
||||
// Stats returns connection statistics for MongoDB
|
||||
func (c *mongoConnection) Stats() *ConnectionStats {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
return &ConnectionStats{
|
||||
Name: c.name,
|
||||
Type: DatabaseTypeMongoDB,
|
||||
Connected: c.connected,
|
||||
LastHealthCheck: c.lastHealthCheck,
|
||||
HealthCheckStatus: c.healthCheckStatus,
|
||||
}
|
||||
}
|
||||
82
pkg/dbmanager/errors.go
Normal file
82
pkg/dbmanager/errors.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package dbmanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Common errors
|
||||
var (
|
||||
// ErrConnectionNotFound is returned when a connection with the given name doesn't exist
|
||||
ErrConnectionNotFound = errors.New("connection not found")
|
||||
|
||||
// ErrInvalidConfiguration is returned when the configuration is invalid
|
||||
ErrInvalidConfiguration = errors.New("invalid configuration")
|
||||
|
||||
// ErrConnectionClosed is returned when attempting to use a closed connection
|
||||
ErrConnectionClosed = errors.New("connection is closed")
|
||||
|
||||
// ErrNotSQLDatabase is returned when attempting SQL operations on a non-SQL database
|
||||
ErrNotSQLDatabase = errors.New("not a SQL database")
|
||||
|
||||
// ErrNotMongoDB is returned when attempting MongoDB operations on a non-MongoDB connection
|
||||
ErrNotMongoDB = errors.New("not a MongoDB connection")
|
||||
|
||||
// ErrUnsupportedDatabase is returned when the database type is not supported
|
||||
ErrUnsupportedDatabase = errors.New("unsupported database type")
|
||||
|
||||
// ErrNoDefaultConnection is returned when no default connection is configured
|
||||
ErrNoDefaultConnection = errors.New("no default connection configured")
|
||||
|
||||
// ErrAlreadyConnected is returned when attempting to connect an already connected connection
|
||||
ErrAlreadyConnected = errors.New("already connected")
|
||||
)
|
||||
|
||||
// ConnectionError wraps errors that occur during connection operations
|
||||
type ConnectionError struct {
|
||||
Name string
|
||||
Operation string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ConnectionError) Error() string {
|
||||
return fmt.Sprintf("connection '%s' %s: %v", e.Name, e.Operation, e.Err)
|
||||
}
|
||||
|
||||
func (e *ConnectionError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// NewConnectionError creates a new ConnectionError
|
||||
func NewConnectionError(name, operation string, err error) *ConnectionError {
|
||||
return &ConnectionError{
|
||||
Name: name,
|
||||
Operation: operation,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigurationError wraps configuration-related errors
|
||||
type ConfigurationError struct {
|
||||
Field string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ConfigurationError) Error() string {
|
||||
if e.Field != "" {
|
||||
return fmt.Sprintf("configuration error in field '%s': %v", e.Field, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("configuration error: %v", e.Err)
|
||||
}
|
||||
|
||||
func (e *ConfigurationError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// NewConfigurationError creates a new ConfigurationError
|
||||
func NewConfigurationError(field string, err error) *ConfigurationError {
|
||||
return &ConfigurationError{
|
||||
Field: field,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
51
pkg/dbmanager/factory.go
Normal file
51
pkg/dbmanager/factory.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package dbmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/dbmanager/providers"
|
||||
)
|
||||
|
||||
// createConnection creates a database connection based on the configuration
|
||||
func createConnection(cfg ConnectionConfig) (Connection, error) {
|
||||
// Validate configuration
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("invalid connection configuration: %w", err)
|
||||
}
|
||||
|
||||
// Create provider based on database type
|
||||
provider, err := createProvider(cfg.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create connection wrapper based on database type
|
||||
switch cfg.Type {
|
||||
case DatabaseTypePostgreSQL, DatabaseTypeSQLite, DatabaseTypeMSSQL:
|
||||
return newSQLConnection(cfg.Name, cfg.Type, cfg, provider), nil
|
||||
case DatabaseTypeMongoDB:
|
||||
return newMongoConnection(cfg.Name, cfg, provider), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %s", ErrUnsupportedDatabase, cfg.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// createProvider creates a database provider based on the database type
|
||||
func createProvider(dbType DatabaseType) (Provider, error) {
|
||||
switch dbType {
|
||||
case DatabaseTypePostgreSQL:
|
||||
return providers.NewPostgresProvider(), nil
|
||||
case DatabaseTypeSQLite:
|
||||
return providers.NewSQLiteProvider(), nil
|
||||
case DatabaseTypeMSSQL:
|
||||
return providers.NewMSSQLProvider(), nil
|
||||
case DatabaseTypeMongoDB:
|
||||
return providers.NewMongoProvider(), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %s", ErrUnsupportedDatabase, dbType)
|
||||
}
|
||||
}
|
||||
|
||||
// Provider is an alias to the providers.Provider interface
|
||||
// This allows dbmanager package consumers to use Provider without importing providers
|
||||
type Provider = providers.Provider
|
||||
379
pkg/dbmanager/manager.go
Normal file
379
pkg/dbmanager/manager.go
Normal file
@@ -0,0 +1,379 @@
|
||||
package dbmanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// Manager manages multiple named database connections
|
||||
type Manager interface {
|
||||
// Connection retrieval
|
||||
Get(name string) (Connection, error)
|
||||
GetDefault() (Connection, error)
|
||||
GetAll() map[string]Connection
|
||||
|
||||
// Default database management
|
||||
GetDefaultDatabase() (common.Database, error)
|
||||
SetDefaultDatabase(name string) error
|
||||
|
||||
// Lifecycle
|
||||
Connect(ctx context.Context) error
|
||||
Close() error
|
||||
HealthCheck(ctx context.Context) error
|
||||
|
||||
// Stats
|
||||
Stats() *ManagerStats
|
||||
}
|
||||
|
||||
// ManagerStats contains statistics about the connection manager
|
||||
type ManagerStats struct {
|
||||
TotalConnections int
|
||||
HealthyCount int
|
||||
UnhealthyCount int
|
||||
ConnectionStats map[string]*ConnectionStats
|
||||
}
|
||||
|
||||
// connectionManager implements Manager
|
||||
type connectionManager struct {
|
||||
connections map[string]Connection
|
||||
config ManagerConfig
|
||||
mu sync.RWMutex
|
||||
|
||||
// Background health check
|
||||
healthTicker *time.Ticker
|
||||
stopChan chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
var (
|
||||
// singleton instance of the manager
|
||||
instance Manager
|
||||
// instanceMu protects the singleton instance
|
||||
instanceMu sync.RWMutex
|
||||
)
|
||||
|
||||
// SetupManager initializes the singleton database manager with the provided configuration.
|
||||
// This function must be called before GetInstance().
|
||||
// Returns an error if the manager is already initialized or if configuration is invalid.
|
||||
func SetupManager(cfg ManagerConfig) error {
|
||||
instanceMu.Lock()
|
||||
defer instanceMu.Unlock()
|
||||
|
||||
if instance != nil {
|
||||
return fmt.Errorf("manager already initialized")
|
||||
}
|
||||
|
||||
mgr, err := NewManager(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create manager: %w", err)
|
||||
}
|
||||
|
||||
instance = mgr
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInstance returns the singleton instance of the database manager.
|
||||
// Returns an error if SetupManager has not been called yet.
|
||||
func GetInstance() (Manager, error) {
|
||||
instanceMu.RLock()
|
||||
defer instanceMu.RUnlock()
|
||||
|
||||
if instance == nil {
|
||||
return nil, fmt.Errorf("manager not initialized: call SetupManager first")
|
||||
}
|
||||
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
// ResetInstance resets the singleton instance (primarily for testing purposes).
|
||||
// WARNING: This should only be used in tests. Calling this in production code
|
||||
// while the manager is in use can lead to undefined behavior.
|
||||
func ResetInstance() {
|
||||
instanceMu.Lock()
|
||||
defer instanceMu.Unlock()
|
||||
|
||||
if instance != nil {
|
||||
_ = instance.Close()
|
||||
}
|
||||
instance = nil
|
||||
}
|
||||
|
||||
// NewManager creates a new database connection manager
|
||||
func NewManager(cfg ManagerConfig) (Manager, error) {
|
||||
// Apply defaults and validate configuration
|
||||
cfg.ApplyDefaults()
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("invalid configuration: %w", err)
|
||||
}
|
||||
|
||||
mgr := &connectionManager{
|
||||
connections: make(map[string]Connection),
|
||||
config: cfg,
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
return mgr, nil
|
||||
}
|
||||
|
||||
// Get retrieves a named connection
|
||||
func (m *connectionManager) Get(name string) (Connection, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
conn, ok := m.connections[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %s", ErrConnectionNotFound, name)
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// GetDefault retrieves the default connection
|
||||
func (m *connectionManager) GetDefault() (Connection, error) {
|
||||
m.mu.RLock()
|
||||
defaultName := m.config.DefaultConnection
|
||||
m.mu.RUnlock()
|
||||
|
||||
if defaultName == "" {
|
||||
return nil, ErrNoDefaultConnection
|
||||
}
|
||||
|
||||
return m.Get(defaultName)
|
||||
}
|
||||
|
||||
// GetAll returns all connections
|
||||
func (m *connectionManager) GetAll() map[string]Connection {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
// Create a copy to avoid concurrent access issues
|
||||
result := make(map[string]Connection, len(m.connections))
|
||||
for name, conn := range m.connections {
|
||||
result[name] = conn
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// GetDefaultDatabase returns the common.Database interface from the default connection
|
||||
func (m *connectionManager) GetDefaultDatabase() (common.Database, error) {
|
||||
conn, err := m.GetDefault()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := conn.Database()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get database from default connection: %w", err)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// SetDefaultDatabase sets the default database connection by name
|
||||
func (m *connectionManager) SetDefaultDatabase(name string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Verify the connection exists
|
||||
if _, ok := m.connections[name]; !ok {
|
||||
return fmt.Errorf("%w: %s", ErrConnectionNotFound, name)
|
||||
}
|
||||
|
||||
m.config.DefaultConnection = name
|
||||
logger.Info("Default database connection changed: name=%s", name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect establishes all configured database connections
|
||||
func (m *connectionManager) Connect(ctx context.Context) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Create connections from configuration
|
||||
for name := range m.config.Connections {
|
||||
// Get a copy of the connection config
|
||||
connCfg := m.config.Connections[name]
|
||||
// Apply global defaults to connection config
|
||||
connCfg.ApplyDefaults(&m.config)
|
||||
connCfg.Name = name
|
||||
|
||||
// Create connection using factory
|
||||
conn, err := createConnection(connCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create connection '%s': %w", name, err)
|
||||
}
|
||||
|
||||
// Connect
|
||||
if err := conn.Connect(ctx); err != nil {
|
||||
return fmt.Errorf("failed to connect '%s': %w", name, err)
|
||||
}
|
||||
|
||||
m.connections[name] = conn
|
||||
logger.Info("Database connection established: name=%s, type=%s", name, connCfg.Type)
|
||||
}
|
||||
|
||||
// Start background health checks if enabled
|
||||
if m.config.EnableAutoReconnect && m.config.HealthCheckInterval > 0 {
|
||||
m.startHealthChecker()
|
||||
}
|
||||
|
||||
logger.Info("Database manager initialized: connections=%d", len(m.connections))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes all database connections
|
||||
func (m *connectionManager) Close() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Stop health checker
|
||||
m.stopHealthChecker()
|
||||
|
||||
// Close all connections
|
||||
var errors []error
|
||||
for name, conn := range m.connections {
|
||||
if err := conn.Close(); err != nil {
|
||||
errors = append(errors, fmt.Errorf("failed to close connection '%s': %w", name, err))
|
||||
logger.Error("Failed to close connection", "name", name, "error", err)
|
||||
} else {
|
||||
logger.Info("Connection closed: name=%s", name)
|
||||
}
|
||||
}
|
||||
|
||||
m.connections = make(map[string]Connection)
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("errors closing connections: %v", errors)
|
||||
}
|
||||
|
||||
logger.Info("Database manager closed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// HealthCheck performs health checks on all connections
|
||||
func (m *connectionManager) HealthCheck(ctx context.Context) error {
|
||||
m.mu.RLock()
|
||||
connections := make(map[string]Connection, len(m.connections))
|
||||
for name, conn := range m.connections {
|
||||
connections[name] = conn
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
var errors []error
|
||||
for name, conn := range connections {
|
||||
if err := conn.HealthCheck(ctx); err != nil {
|
||||
errors = append(errors, fmt.Errorf("connection '%s': %w", name, err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("health check failed for %d connections: %v", len(errors), errors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stats returns statistics for all connections
|
||||
func (m *connectionManager) Stats() *ManagerStats {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
stats := &ManagerStats{
|
||||
TotalConnections: len(m.connections),
|
||||
ConnectionStats: make(map[string]*ConnectionStats),
|
||||
}
|
||||
|
||||
for name, conn := range m.connections {
|
||||
connStats := conn.Stats()
|
||||
stats.ConnectionStats[name] = connStats
|
||||
|
||||
if connStats.Connected && connStats.HealthCheckStatus == "healthy" {
|
||||
stats.HealthyCount++
|
||||
} else {
|
||||
stats.UnhealthyCount++
|
||||
}
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
// startHealthChecker starts background health checking
|
||||
func (m *connectionManager) startHealthChecker() {
|
||||
if m.healthTicker != nil {
|
||||
return // Already running
|
||||
}
|
||||
|
||||
m.healthTicker = time.NewTicker(m.config.HealthCheckInterval)
|
||||
|
||||
m.wg.Add(1)
|
||||
go func() {
|
||||
defer m.wg.Done()
|
||||
logger.Info("Health checker started: interval=%v", m.config.HealthCheckInterval)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-m.healthTicker.C:
|
||||
m.performHealthCheck()
|
||||
case <-m.stopChan:
|
||||
logger.Info("Health checker stopped")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// stopHealthChecker stops background health checking
|
||||
func (m *connectionManager) stopHealthChecker() {
|
||||
if m.healthTicker != nil {
|
||||
m.healthTicker.Stop()
|
||||
close(m.stopChan)
|
||||
m.wg.Wait()
|
||||
m.healthTicker = nil
|
||||
}
|
||||
}
|
||||
|
||||
// performHealthCheck performs a health check on all connections
|
||||
func (m *connectionManager) performHealthCheck() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
m.mu.RLock()
|
||||
connections := make([]struct {
|
||||
name string
|
||||
conn Connection
|
||||
}, 0, len(m.connections))
|
||||
for name, conn := range m.connections {
|
||||
connections = append(connections, struct {
|
||||
name string
|
||||
conn Connection
|
||||
}{name, conn})
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
for _, item := range connections {
|
||||
if err := item.conn.HealthCheck(ctx); err != nil {
|
||||
logger.Warn("Health check failed",
|
||||
"connection", item.name,
|
||||
"error", err)
|
||||
|
||||
// Attempt reconnection if enabled
|
||||
if m.config.EnableAutoReconnect {
|
||||
logger.Info("Attempting reconnection: connection=%s", item.name)
|
||||
if err := item.conn.Reconnect(ctx); err != nil {
|
||||
logger.Error("Reconnection failed",
|
||||
"connection", item.name,
|
||||
"error", err)
|
||||
} else {
|
||||
logger.Info("Reconnection successful: connection=%s", item.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
136
pkg/dbmanager/metrics.go
Normal file
136
pkg/dbmanager/metrics.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package dbmanager
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
// connectionsTotal tracks the total number of configured database connections
|
||||
connectionsTotal = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "dbmanager_connections_total",
|
||||
Help: "Total number of configured database connections",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
|
||||
// connectionStatus tracks connection health status (1=healthy, 0=unhealthy)
|
||||
connectionStatus = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "dbmanager_connection_status",
|
||||
Help: "Connection status (1=healthy, 0=unhealthy)",
|
||||
},
|
||||
[]string{"name", "type"},
|
||||
)
|
||||
|
||||
// connectionPoolSize tracks connection pool sizes
|
||||
connectionPoolSize = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "dbmanager_connection_pool_size",
|
||||
Help: "Current connection pool size",
|
||||
},
|
||||
[]string{"name", "type", "state"}, // state: open, idle, in_use
|
||||
)
|
||||
|
||||
// connectionWaitCount tracks how many times connections had to wait for availability
|
||||
connectionWaitCount = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "dbmanager_connection_wait_count",
|
||||
Help: "Number of times connections had to wait for availability",
|
||||
},
|
||||
[]string{"name", "type"},
|
||||
)
|
||||
|
||||
// connectionWaitDuration tracks total time connections spent waiting
|
||||
connectionWaitDuration = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "dbmanager_connection_wait_duration_seconds",
|
||||
Help: "Total time connections spent waiting for availability",
|
||||
},
|
||||
[]string{"name", "type"},
|
||||
)
|
||||
|
||||
// reconnectAttempts tracks reconnection attempts and their outcomes
|
||||
reconnectAttempts = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "dbmanager_reconnect_attempts_total",
|
||||
Help: "Total number of reconnection attempts",
|
||||
},
|
||||
[]string{"name", "type", "result"}, // result: success, failure
|
||||
)
|
||||
|
||||
// connectionLifetimeClosed tracks connections closed due to max lifetime
|
||||
connectionLifetimeClosed = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "dbmanager_connection_lifetime_closed_total",
|
||||
Help: "Total connections closed due to exceeding max lifetime",
|
||||
},
|
||||
[]string{"name", "type"},
|
||||
)
|
||||
|
||||
// connectionIdleClosed tracks connections closed due to max idle time
|
||||
connectionIdleClosed = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "dbmanager_connection_idle_closed_total",
|
||||
Help: "Total connections closed due to exceeding max idle time",
|
||||
},
|
||||
[]string{"name", "type"},
|
||||
)
|
||||
)
|
||||
|
||||
// PublishMetrics publishes current metrics for all connections
|
||||
func (m *connectionManager) PublishMetrics() {
|
||||
stats := m.Stats()
|
||||
|
||||
// Count connections by type
|
||||
typeCount := make(map[DatabaseType]int)
|
||||
for _, connStats := range stats.ConnectionStats {
|
||||
typeCount[connStats.Type]++
|
||||
}
|
||||
|
||||
// Update total connections gauge
|
||||
for dbType, count := range typeCount {
|
||||
connectionsTotal.WithLabelValues(string(dbType)).Set(float64(count))
|
||||
}
|
||||
|
||||
// Update per-connection metrics
|
||||
for name, connStats := range stats.ConnectionStats {
|
||||
labels := prometheus.Labels{
|
||||
"name": name,
|
||||
"type": string(connStats.Type),
|
||||
}
|
||||
|
||||
// Connection status
|
||||
status := float64(0)
|
||||
if connStats.Connected && connStats.HealthCheckStatus == "healthy" {
|
||||
status = 1
|
||||
}
|
||||
connectionStatus.With(labels).Set(status)
|
||||
|
||||
// Pool size metrics (SQL databases only)
|
||||
if connStats.Type != DatabaseTypeMongoDB {
|
||||
connectionPoolSize.WithLabelValues(name, string(connStats.Type), "open").Set(float64(connStats.OpenConnections))
|
||||
connectionPoolSize.WithLabelValues(name, string(connStats.Type), "idle").Set(float64(connStats.Idle))
|
||||
connectionPoolSize.WithLabelValues(name, string(connStats.Type), "in_use").Set(float64(connStats.InUse))
|
||||
|
||||
// Wait stats
|
||||
connectionWaitCount.With(labels).Set(float64(connStats.WaitCount))
|
||||
connectionWaitDuration.With(labels).Set(connStats.WaitDuration.Seconds())
|
||||
|
||||
// Lifetime/idle closed
|
||||
connectionLifetimeClosed.With(labels).Set(float64(connStats.MaxLifetimeClosed))
|
||||
connectionIdleClosed.With(labels).Set(float64(connStats.MaxIdleClosed))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RecordReconnectAttempt records a reconnection attempt
|
||||
func RecordReconnectAttempt(name string, dbType DatabaseType, success bool) {
|
||||
result := "failure"
|
||||
if success {
|
||||
result = "success"
|
||||
}
|
||||
|
||||
reconnectAttempts.WithLabelValues(name, string(dbType), result).Inc()
|
||||
}
|
||||
319
pkg/dbmanager/providers/POSTGRES_NOTIFY_LISTEN.md
Normal file
319
pkg/dbmanager/providers/POSTGRES_NOTIFY_LISTEN.md
Normal file
@@ -0,0 +1,319 @@
|
||||
# PostgreSQL NOTIFY/LISTEN Support
|
||||
|
||||
The `dbmanager` package provides built-in support for PostgreSQL's NOTIFY/LISTEN functionality through the `PostgresListener` type.
|
||||
|
||||
## Overview
|
||||
|
||||
PostgreSQL NOTIFY/LISTEN is a simple pub/sub mechanism that allows database clients to:
|
||||
- **LISTEN** on named channels to receive notifications
|
||||
- **NOTIFY** channels to send messages to all listeners
|
||||
- Receive asynchronous notifications without polling
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ Subscribe to multiple channels simultaneously
|
||||
- ✅ Callback-based notification handling
|
||||
- ✅ Automatic reconnection on connection loss
|
||||
- ✅ Automatic resubscription after reconnection
|
||||
- ✅ Thread-safe operations
|
||||
- ✅ Panic recovery in notification handlers
|
||||
- ✅ Dedicated connection for listening (doesn't interfere with queries)
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/dbmanager/providers"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create PostgreSQL provider
|
||||
cfg := &providers.Config{
|
||||
Name: "primary",
|
||||
Type: "postgres",
|
||||
Host: "localhost",
|
||||
Port: 5432,
|
||||
User: "postgres",
|
||||
Password: "password",
|
||||
Database: "myapp",
|
||||
}
|
||||
|
||||
provider := providers.NewPostgresProvider()
|
||||
ctx := context.Background()
|
||||
|
||||
if err := provider.Connect(ctx, cfg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Get listener
|
||||
listener, err := provider.GetListener(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Subscribe to a channel
|
||||
err = listener.Listen("events", func(channel, payload string) {
|
||||
fmt.Printf("Received on %s: %s\n", channel, payload)
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Send a notification
|
||||
err = listener.Notify(ctx, "events", "Hello, World!")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Keep the program running
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
```
|
||||
|
||||
### Multiple Channels
|
||||
|
||||
```go
|
||||
listener, _ := provider.GetListener(ctx)
|
||||
|
||||
// Listen to different channels with different handlers
|
||||
listener.Listen("user_events", func(channel, payload string) {
|
||||
fmt.Printf("User event: %s\n", payload)
|
||||
})
|
||||
|
||||
listener.Listen("order_events", func(channel, payload string) {
|
||||
fmt.Printf("Order event: %s\n", payload)
|
||||
})
|
||||
|
||||
listener.Listen("payment_events", func(channel, payload string) {
|
||||
fmt.Printf("Payment event: %s\n", payload)
|
||||
})
|
||||
```
|
||||
|
||||
### Unsubscribing
|
||||
|
||||
```go
|
||||
// Stop listening to a specific channel
|
||||
err := listener.Unlisten("user_events")
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to unlisten: %v\n", err)
|
||||
}
|
||||
```
|
||||
|
||||
### Checking Active Channels
|
||||
|
||||
```go
|
||||
// Get list of channels currently being listened to
|
||||
channels := listener.Channels()
|
||||
fmt.Printf("Listening to: %v\n", channels)
|
||||
```
|
||||
|
||||
### Checking Connection Status
|
||||
|
||||
```go
|
||||
if listener.IsConnected() {
|
||||
fmt.Println("Listener is connected")
|
||||
} else {
|
||||
fmt.Println("Listener is disconnected")
|
||||
}
|
||||
```
|
||||
|
||||
## Integration with DBManager
|
||||
|
||||
When using the DBManager, you can access the listener through the PostgreSQL provider:
|
||||
|
||||
```go
|
||||
// Initialize DBManager
|
||||
mgr, err := dbmanager.NewManager(dbmanager.FromConfig(cfg.DBManager))
|
||||
mgr.Connect(ctx)
|
||||
defer mgr.Close()
|
||||
|
||||
// Get PostgreSQL connection
|
||||
conn, err := mgr.Get("primary")
|
||||
|
||||
// Note: You'll need to cast to the underlying provider type
|
||||
// This requires exposing the provider through the Connection interface
|
||||
// or providing a helper method
|
||||
```
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Cache Invalidation
|
||||
|
||||
```go
|
||||
listener.Listen("cache_invalidation", func(channel, payload string) {
|
||||
// Parse the payload to determine what to invalidate
|
||||
cache.Invalidate(payload)
|
||||
})
|
||||
```
|
||||
|
||||
### Real-time Updates
|
||||
|
||||
```go
|
||||
listener.Listen("data_updates", func(channel, payload string) {
|
||||
// Broadcast update to WebSocket clients
|
||||
websocketBroadcast(payload)
|
||||
})
|
||||
```
|
||||
|
||||
### Configuration Reload
|
||||
|
||||
```go
|
||||
listener.Listen("config_reload", func(channel, payload string) {
|
||||
// Reload application configuration
|
||||
config.Reload()
|
||||
})
|
||||
```
|
||||
|
||||
### Distributed Locking
|
||||
|
||||
```go
|
||||
listener.Listen("lock_released", func(channel, payload string) {
|
||||
// Attempt to acquire the lock
|
||||
tryAcquireLock(payload)
|
||||
})
|
||||
```
|
||||
|
||||
## Automatic Reconnection
|
||||
|
||||
The listener automatically handles connection failures:
|
||||
|
||||
1. When a connection error is detected, the listener initiates reconnection
|
||||
2. Once reconnected, it automatically resubscribes to all previous channels
|
||||
3. Notification handlers remain active throughout the reconnection process
|
||||
|
||||
No manual intervention is required for reconnection.
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Handler Panics
|
||||
|
||||
If a notification handler panics, the panic is recovered and logged. The listener continues to function normally:
|
||||
|
||||
```go
|
||||
listener.Listen("events", func(channel, payload string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("Handler panic: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Your event processing logic
|
||||
processEvent(payload)
|
||||
})
|
||||
```
|
||||
|
||||
### Connection Errors
|
||||
|
||||
Connection errors trigger automatic reconnection. Check logs for reconnection events when `EnableLogging` is true.
|
||||
|
||||
## Thread Safety
|
||||
|
||||
All `PostgresListener` methods are thread-safe and can be called concurrently from multiple goroutines.
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
1. **Dedicated Connection**: The listener uses a dedicated PostgreSQL connection separate from the query connection pool
|
||||
2. **Asynchronous Handlers**: Notification handlers run in separate goroutines to avoid blocking
|
||||
3. **Lightweight**: NOTIFY/LISTEN has minimal overhead compared to polling
|
||||
|
||||
## Comparison with Polling
|
||||
|
||||
| Feature | NOTIFY/LISTEN | Polling |
|
||||
|---------|---------------|---------|
|
||||
| Latency | Low (near real-time) | High (depends on poll interval) |
|
||||
| Database Load | Minimal | High (constant queries) |
|
||||
| Scalability | Excellent | Poor |
|
||||
| Complexity | Simple | Moderate |
|
||||
|
||||
## Limitations
|
||||
|
||||
1. **PostgreSQL Only**: This feature is specific to PostgreSQL and not available for other databases
|
||||
2. **No Message Persistence**: Notifications are not stored; if no listener is connected, the message is lost
|
||||
3. **Payload Limit**: Notification payload is limited to 8000 bytes in PostgreSQL
|
||||
4. **No Guaranteed Delivery**: If a listener disconnects, in-flight notifications may be lost
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Keep Handlers Fast**: Notification handlers should be quick; for heavy processing, send work to a queue
|
||||
2. **Use JSON Payloads**: Encode structured data as JSON for easy parsing
|
||||
3. **Handle Errors Gracefully**: Always recover from panics in handlers
|
||||
4. **Close Properly**: Always close the provider to ensure the listener is properly shut down
|
||||
5. **Monitor Connection Status**: Use `IsConnected()` for health checks
|
||||
|
||||
## Example: Real-World Application
|
||||
|
||||
```go
|
||||
// Subscribe to various application events
|
||||
listener, _ := provider.GetListener(ctx)
|
||||
|
||||
// User registration events
|
||||
listener.Listen("user_registered", func(channel, payload string) {
|
||||
var event UserRegisteredEvent
|
||||
json.Unmarshal([]byte(payload), &event)
|
||||
|
||||
// Send welcome email
|
||||
sendWelcomeEmail(event.UserID)
|
||||
|
||||
// Invalidate user count cache
|
||||
cache.Delete("user_count")
|
||||
})
|
||||
|
||||
// Order placement events
|
||||
listener.Listen("order_placed", func(channel, payload string) {
|
||||
var event OrderPlacedEvent
|
||||
json.Unmarshal([]byte(payload), &event)
|
||||
|
||||
// Notify warehouse system
|
||||
warehouse.ProcessOrder(event.OrderID)
|
||||
|
||||
// Update inventory cache
|
||||
cache.Invalidate("inventory:" + event.ProductID)
|
||||
})
|
||||
|
||||
// Configuration changes
|
||||
listener.Listen("config_updated", func(channel, payload string) {
|
||||
// Reload configuration from database
|
||||
appConfig.Reload()
|
||||
})
|
||||
```
|
||||
|
||||
## Triggering Notifications from SQL
|
||||
|
||||
You can trigger notifications directly from PostgreSQL triggers or functions:
|
||||
|
||||
```sql
|
||||
-- Example trigger to notify on new user
|
||||
CREATE OR REPLACE FUNCTION notify_user_registered()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
PERFORM pg_notify('user_registered',
|
||||
json_build_object(
|
||||
'user_id', NEW.id,
|
||||
'email', NEW.email,
|
||||
'timestamp', NOW()
|
||||
)::text
|
||||
);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER user_registered_trigger
|
||||
AFTER INSERT ON users
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION notify_user_registered();
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [PostgreSQL NOTIFY Documentation](https://www.postgresql.org/docs/current/sql-notify.html)
|
||||
- [PostgreSQL LISTEN Documentation](https://www.postgresql.org/docs/current/sql-listen.html)
|
||||
- [pgx Driver Documentation](https://github.com/jackc/pgx)
|
||||
214
pkg/dbmanager/providers/mongodb.go
Normal file
214
pkg/dbmanager/providers/mongodb.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// MongoProvider implements Provider for MongoDB databases
|
||||
type MongoProvider struct {
|
||||
client *mongo.Client
|
||||
config ConnectionConfig
|
||||
}
|
||||
|
||||
// NewMongoProvider creates a new MongoDB provider
|
||||
func NewMongoProvider() *MongoProvider {
|
||||
return &MongoProvider{}
|
||||
}
|
||||
|
||||
// Connect establishes a MongoDB connection
|
||||
func (p *MongoProvider) Connect(ctx context.Context, cfg ConnectionConfig) error {
|
||||
// Build DSN
|
||||
dsn, err := cfg.BuildDSN()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build DSN: %w", err)
|
||||
}
|
||||
|
||||
// Create client options
|
||||
clientOpts := options.Client().ApplyURI(dsn)
|
||||
|
||||
// Set connection pool size
|
||||
if cfg.GetMaxOpenConns() != nil {
|
||||
maxPoolSize := uint64(*cfg.GetMaxOpenConns())
|
||||
clientOpts.SetMaxPoolSize(maxPoolSize)
|
||||
}
|
||||
|
||||
if cfg.GetMaxIdleConns() != nil {
|
||||
minPoolSize := uint64(*cfg.GetMaxIdleConns())
|
||||
clientOpts.SetMinPoolSize(minPoolSize)
|
||||
}
|
||||
|
||||
// Set timeouts
|
||||
clientOpts.SetConnectTimeout(cfg.GetConnectTimeout())
|
||||
if cfg.GetQueryTimeout() > 0 {
|
||||
clientOpts.SetTimeout(cfg.GetQueryTimeout())
|
||||
}
|
||||
|
||||
// Set read preference if specified
|
||||
if cfg.GetReadPreference() != "" {
|
||||
rp, err := parseReadPreference(cfg.GetReadPreference())
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid read preference: %w", err)
|
||||
}
|
||||
clientOpts.SetReadPreference(rp)
|
||||
}
|
||||
|
||||
// Connect with retry logic
|
||||
var client *mongo.Client
|
||||
var lastErr error
|
||||
|
||||
retryAttempts := 3
|
||||
retryDelay := 1 * time.Second
|
||||
|
||||
for attempt := 0; attempt < retryAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
delay := calculateBackoff(attempt, retryDelay, 10*time.Second)
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Info("Retrying MongoDB connection: attempt=%d/%d, delay=%v", attempt+1, retryAttempts, delay)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Create MongoDB client
|
||||
client, err = mongo.Connect(ctx, clientOpts)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to connect to MongoDB", "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Ping the database to verify connection
|
||||
pingCtx, cancel := context.WithTimeout(ctx, cfg.GetConnectTimeout())
|
||||
err = client.Ping(pingCtx, readpref.Primary())
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
_ = client.Disconnect(ctx)
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to ping MongoDB", "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Connection successful
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect after %d attempts: %w", retryAttempts, lastErr)
|
||||
}
|
||||
|
||||
p.client = client
|
||||
p.config = cfg
|
||||
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Info("MongoDB connection established: name=%s, host=%s, database=%s", cfg.GetName(), cfg.GetHost(), cfg.GetDatabase())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the MongoDB connection
|
||||
func (p *MongoProvider) Close() error {
|
||||
if p.client == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := p.client.Disconnect(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close MongoDB connection: %w", err)
|
||||
}
|
||||
|
||||
if p.config.GetEnableLogging() {
|
||||
logger.Info("MongoDB connection closed: name=%s", p.config.GetName())
|
||||
}
|
||||
|
||||
p.client = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// HealthCheck verifies the MongoDB connection is alive
|
||||
func (p *MongoProvider) HealthCheck(ctx context.Context) error {
|
||||
if p.client == nil {
|
||||
return fmt.Errorf("MongoDB client is nil")
|
||||
}
|
||||
|
||||
// Use a short timeout for health checks
|
||||
healthCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := p.client.Ping(healthCtx, readpref.Primary()); err != nil {
|
||||
return fmt.Errorf("health check failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNative returns an error for MongoDB (not a SQL database)
|
||||
func (p *MongoProvider) GetNative() (*sql.DB, error) {
|
||||
return nil, ErrNotSQLDatabase
|
||||
}
|
||||
|
||||
// GetMongo returns the MongoDB client
|
||||
func (p *MongoProvider) GetMongo() (*mongo.Client, error) {
|
||||
if p.client == nil {
|
||||
return nil, fmt.Errorf("MongoDB client is not initialized")
|
||||
}
|
||||
return p.client, nil
|
||||
}
|
||||
|
||||
// Stats returns connection statistics for MongoDB
|
||||
func (p *MongoProvider) Stats() *ConnectionStats {
|
||||
if p.client == nil {
|
||||
return &ConnectionStats{
|
||||
Name: p.config.GetName(),
|
||||
Type: "mongodb",
|
||||
Connected: false,
|
||||
}
|
||||
}
|
||||
|
||||
// MongoDB doesn't expose detailed connection pool stats like sql.DB
|
||||
// We return basic stats
|
||||
return &ConnectionStats{
|
||||
Name: p.config.GetName(),
|
||||
Type: "mongodb",
|
||||
Connected: true,
|
||||
}
|
||||
}
|
||||
|
||||
// parseReadPreference parses a read preference string into a readpref.ReadPref
|
||||
func parseReadPreference(rp string) (*readpref.ReadPref, error) {
|
||||
switch rp {
|
||||
case "primary":
|
||||
return readpref.Primary(), nil
|
||||
case "primaryPreferred":
|
||||
return readpref.PrimaryPreferred(), nil
|
||||
case "secondary":
|
||||
return readpref.Secondary(), nil
|
||||
case "secondaryPreferred":
|
||||
return readpref.SecondaryPreferred(), nil
|
||||
case "nearest":
|
||||
return readpref.Nearest(), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown read preference: %s", rp)
|
||||
}
|
||||
}
|
||||
184
pkg/dbmanager/providers/mssql.go
Normal file
184
pkg/dbmanager/providers/mssql.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
_ "github.com/microsoft/go-mssqldb" // MSSQL driver
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// MSSQLProvider implements Provider for Microsoft SQL Server databases
|
||||
type MSSQLProvider struct {
|
||||
db *sql.DB
|
||||
config ConnectionConfig
|
||||
}
|
||||
|
||||
// NewMSSQLProvider creates a new MSSQL provider
|
||||
func NewMSSQLProvider() *MSSQLProvider {
|
||||
return &MSSQLProvider{}
|
||||
}
|
||||
|
||||
// Connect establishes a MSSQL connection
|
||||
func (p *MSSQLProvider) Connect(ctx context.Context, cfg ConnectionConfig) error {
|
||||
// Build DSN
|
||||
dsn, err := cfg.BuildDSN()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build DSN: %w", err)
|
||||
}
|
||||
|
||||
// Connect with retry logic
|
||||
var db *sql.DB
|
||||
var lastErr error
|
||||
|
||||
retryAttempts := 3 // Default retry attempts
|
||||
retryDelay := 1 * time.Second
|
||||
|
||||
for attempt := 0; attempt < retryAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
delay := calculateBackoff(attempt, retryDelay, 10*time.Second)
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Info("Retrying MSSQL connection: attempt=%d/%d, delay=%v", attempt+1, retryAttempts, delay)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Open database connection
|
||||
db, err = sql.Open("sqlserver", dsn)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to open MSSQL connection", "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Test the connection with context timeout
|
||||
connectCtx, cancel := context.WithTimeout(ctx, cfg.GetConnectTimeout())
|
||||
err = db.PingContext(connectCtx)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
db.Close()
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to ping MSSQL database", "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Connection successful
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect after %d attempts: %w", retryAttempts, lastErr)
|
||||
}
|
||||
|
||||
// Configure connection pool
|
||||
if cfg.GetMaxOpenConns() != nil {
|
||||
db.SetMaxOpenConns(*cfg.GetMaxOpenConns())
|
||||
}
|
||||
if cfg.GetMaxIdleConns() != nil {
|
||||
db.SetMaxIdleConns(*cfg.GetMaxIdleConns())
|
||||
}
|
||||
if cfg.GetConnMaxLifetime() != nil {
|
||||
db.SetConnMaxLifetime(*cfg.GetConnMaxLifetime())
|
||||
}
|
||||
if cfg.GetConnMaxIdleTime() != nil {
|
||||
db.SetConnMaxIdleTime(*cfg.GetConnMaxIdleTime())
|
||||
}
|
||||
|
||||
p.db = db
|
||||
p.config = cfg
|
||||
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Info("MSSQL connection established: name=%s, host=%s, database=%s", cfg.GetName(), cfg.GetHost(), cfg.GetDatabase())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the MSSQL connection
|
||||
func (p *MSSQLProvider) Close() error {
|
||||
if p.db == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := p.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close MSSQL connection: %w", err)
|
||||
}
|
||||
|
||||
if p.config.GetEnableLogging() {
|
||||
logger.Info("MSSQL connection closed: name=%s", p.config.GetName())
|
||||
}
|
||||
|
||||
p.db = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// HealthCheck verifies the MSSQL connection is alive
|
||||
func (p *MSSQLProvider) HealthCheck(ctx context.Context) error {
|
||||
if p.db == nil {
|
||||
return fmt.Errorf("database connection is nil")
|
||||
}
|
||||
|
||||
// Use a short timeout for health checks
|
||||
healthCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := p.db.PingContext(healthCtx); err != nil {
|
||||
return fmt.Errorf("health check failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNative returns the native *sql.DB connection
|
||||
func (p *MSSQLProvider) GetNative() (*sql.DB, error) {
|
||||
if p.db == nil {
|
||||
return nil, fmt.Errorf("database connection is not initialized")
|
||||
}
|
||||
return p.db, nil
|
||||
}
|
||||
|
||||
// GetMongo returns an error for MSSQL (not a MongoDB connection)
|
||||
func (p *MSSQLProvider) GetMongo() (*mongo.Client, error) {
|
||||
return nil, ErrNotMongoDB
|
||||
}
|
||||
|
||||
// Stats returns connection pool statistics
|
||||
func (p *MSSQLProvider) Stats() *ConnectionStats {
|
||||
if p.db == nil {
|
||||
return &ConnectionStats{
|
||||
Name: p.config.GetName(),
|
||||
Type: "mssql",
|
||||
Connected: false,
|
||||
}
|
||||
}
|
||||
|
||||
stats := p.db.Stats()
|
||||
|
||||
return &ConnectionStats{
|
||||
Name: p.config.GetName(),
|
||||
Type: "mssql",
|
||||
Connected: true,
|
||||
OpenConnections: stats.OpenConnections,
|
||||
InUse: stats.InUse,
|
||||
Idle: stats.Idle,
|
||||
WaitCount: stats.WaitCount,
|
||||
WaitDuration: stats.WaitDuration,
|
||||
MaxIdleClosed: stats.MaxIdleClosed,
|
||||
MaxLifetimeClosed: stats.MaxLifetimeClosed,
|
||||
}
|
||||
}
|
||||
231
pkg/dbmanager/providers/postgres.go
Normal file
231
pkg/dbmanager/providers/postgres.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
_ "github.com/jackc/pgx/v5/stdlib" // PostgreSQL driver
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// PostgresProvider implements Provider for PostgreSQL databases
|
||||
type PostgresProvider struct {
|
||||
db *sql.DB
|
||||
config ConnectionConfig
|
||||
listener *PostgresListener
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewPostgresProvider creates a new PostgreSQL provider
|
||||
func NewPostgresProvider() *PostgresProvider {
|
||||
return &PostgresProvider{}
|
||||
}
|
||||
|
||||
// Connect establishes a PostgreSQL connection
|
||||
func (p *PostgresProvider) Connect(ctx context.Context, cfg ConnectionConfig) error {
|
||||
// Build DSN
|
||||
dsn, err := cfg.BuildDSN()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build DSN: %w", err)
|
||||
}
|
||||
|
||||
// Connect with retry logic
|
||||
var db *sql.DB
|
||||
var lastErr error
|
||||
|
||||
retryAttempts := 3 // Default retry attempts
|
||||
retryDelay := 1 * time.Second
|
||||
|
||||
for attempt := 0; attempt < retryAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
delay := calculateBackoff(attempt, retryDelay, 10*time.Second)
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Info("Retrying PostgreSQL connection: attempt=%d/%d, delay=%v", attempt+1, retryAttempts, delay)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Open database connection
|
||||
db, err = sql.Open("pgx", dsn)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to open PostgreSQL connection", "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Test the connection with context timeout
|
||||
connectCtx, cancel := context.WithTimeout(ctx, cfg.GetConnectTimeout())
|
||||
err = db.PingContext(connectCtx)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
db.Close()
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to ping PostgreSQL database", "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Connection successful
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect after %d attempts: %w", retryAttempts, lastErr)
|
||||
}
|
||||
|
||||
// Configure connection pool
|
||||
if cfg.GetMaxOpenConns() != nil {
|
||||
db.SetMaxOpenConns(*cfg.GetMaxOpenConns())
|
||||
}
|
||||
if cfg.GetMaxIdleConns() != nil {
|
||||
db.SetMaxIdleConns(*cfg.GetMaxIdleConns())
|
||||
}
|
||||
if cfg.GetConnMaxLifetime() != nil {
|
||||
db.SetConnMaxLifetime(*cfg.GetConnMaxLifetime())
|
||||
}
|
||||
if cfg.GetConnMaxIdleTime() != nil {
|
||||
db.SetConnMaxIdleTime(*cfg.GetConnMaxIdleTime())
|
||||
}
|
||||
|
||||
p.db = db
|
||||
p.config = cfg
|
||||
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Info("PostgreSQL connection established: name=%s, host=%s, database=%s", cfg.GetName(), cfg.GetHost(), cfg.GetDatabase())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the PostgreSQL connection
|
||||
func (p *PostgresProvider) Close() error {
|
||||
// Close listener if it exists
|
||||
p.mu.Lock()
|
||||
if p.listener != nil {
|
||||
if err := p.listener.Close(); err != nil {
|
||||
p.mu.Unlock()
|
||||
return fmt.Errorf("failed to close listener: %w", err)
|
||||
}
|
||||
p.listener = nil
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
if p.db == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := p.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close PostgreSQL connection: %w", err)
|
||||
}
|
||||
|
||||
if p.config.GetEnableLogging() {
|
||||
logger.Info("PostgreSQL connection closed: name=%s", p.config.GetName())
|
||||
}
|
||||
|
||||
p.db = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// HealthCheck verifies the PostgreSQL connection is alive
|
||||
func (p *PostgresProvider) HealthCheck(ctx context.Context) error {
|
||||
if p.db == nil {
|
||||
return fmt.Errorf("database connection is nil")
|
||||
}
|
||||
|
||||
// Use a short timeout for health checks
|
||||
healthCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := p.db.PingContext(healthCtx); err != nil {
|
||||
return fmt.Errorf("health check failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNative returns the native *sql.DB connection
|
||||
func (p *PostgresProvider) GetNative() (*sql.DB, error) {
|
||||
if p.db == nil {
|
||||
return nil, fmt.Errorf("database connection is not initialized")
|
||||
}
|
||||
return p.db, nil
|
||||
}
|
||||
|
||||
// GetMongo returns an error for PostgreSQL (not a MongoDB connection)
|
||||
func (p *PostgresProvider) GetMongo() (*mongo.Client, error) {
|
||||
return nil, ErrNotMongoDB
|
||||
}
|
||||
|
||||
// Stats returns connection pool statistics
|
||||
func (p *PostgresProvider) Stats() *ConnectionStats {
|
||||
if p.db == nil {
|
||||
return &ConnectionStats{
|
||||
Name: p.config.GetName(),
|
||||
Type: "postgres",
|
||||
Connected: false,
|
||||
}
|
||||
}
|
||||
|
||||
stats := p.db.Stats()
|
||||
|
||||
return &ConnectionStats{
|
||||
Name: p.config.GetName(),
|
||||
Type: "postgres",
|
||||
Connected: true,
|
||||
OpenConnections: stats.OpenConnections,
|
||||
InUse: stats.InUse,
|
||||
Idle: stats.Idle,
|
||||
WaitCount: stats.WaitCount,
|
||||
WaitDuration: stats.WaitDuration,
|
||||
MaxIdleClosed: stats.MaxIdleClosed,
|
||||
MaxLifetimeClosed: stats.MaxLifetimeClosed,
|
||||
}
|
||||
}
|
||||
|
||||
// GetListener returns a PostgreSQL listener for NOTIFY/LISTEN functionality
|
||||
// The listener is lazily initialized on first call and reused for subsequent calls
|
||||
func (p *PostgresProvider) GetListener(ctx context.Context) (*PostgresListener, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// Return existing listener if already created
|
||||
if p.listener != nil {
|
||||
return p.listener, nil
|
||||
}
|
||||
|
||||
// Create new listener
|
||||
listener := NewPostgresListener(p.config)
|
||||
|
||||
// Connect the listener
|
||||
if err := listener.Connect(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect listener: %w", err)
|
||||
}
|
||||
|
||||
p.listener = listener
|
||||
return p.listener, nil
|
||||
}
|
||||
|
||||
// calculateBackoff calculates exponential backoff delay
|
||||
func calculateBackoff(attempt int, initial, maxDelay time.Duration) time.Duration {
|
||||
delay := initial * time.Duration(math.Pow(2, float64(attempt)))
|
||||
if delay > maxDelay {
|
||||
delay = maxDelay
|
||||
}
|
||||
return delay
|
||||
}
|
||||
401
pkg/dbmanager/providers/postgres_listener.go
Normal file
401
pkg/dbmanager/providers/postgres_listener.go
Normal file
@@ -0,0 +1,401 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// NotificationHandler is called when a notification is received
|
||||
type NotificationHandler func(channel string, payload string)
|
||||
|
||||
// PostgresListener manages PostgreSQL LISTEN/NOTIFY functionality
|
||||
type PostgresListener struct {
|
||||
config ConnectionConfig
|
||||
conn *pgx.Conn
|
||||
|
||||
// Channel subscriptions
|
||||
channels map[string]NotificationHandler
|
||||
mu sync.RWMutex
|
||||
|
||||
// Lifecycle management
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
closed bool
|
||||
closeMu sync.Mutex
|
||||
reconnectC chan struct{}
|
||||
}
|
||||
|
||||
// NewPostgresListener creates a new PostgreSQL listener
|
||||
func NewPostgresListener(cfg ConnectionConfig) *PostgresListener {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &PostgresListener{
|
||||
config: cfg,
|
||||
channels: make(map[string]NotificationHandler),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
reconnectC: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Connect establishes a dedicated connection for listening
|
||||
func (l *PostgresListener) Connect(ctx context.Context) error {
|
||||
dsn, err := l.config.BuildDSN()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build DSN: %w", err)
|
||||
}
|
||||
|
||||
// Parse connection config
|
||||
connConfig, err := pgx.ParseConfig(dsn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse connection config: %w", err)
|
||||
}
|
||||
|
||||
// Connect with retry logic
|
||||
var conn *pgx.Conn
|
||||
var lastErr error
|
||||
|
||||
retryAttempts := 3
|
||||
retryDelay := 1 * time.Second
|
||||
|
||||
for attempt := 0; attempt < retryAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
delay := calculateBackoff(attempt, retryDelay, 10*time.Second)
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Info("Retrying PostgreSQL listener connection: attempt=%d/%d, delay=%v", attempt+1, retryAttempts, delay)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
conn, err = pgx.ConnectConfig(ctx, connConfig)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Warn("Failed to connect PostgreSQL listener", "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Test the connection
|
||||
if err = conn.Ping(ctx); err != nil {
|
||||
lastErr = err
|
||||
conn.Close(ctx)
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Warn("Failed to ping PostgreSQL listener", "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Connection successful
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect listener after %d attempts: %w", retryAttempts, lastErr)
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
l.conn = conn
|
||||
l.mu.Unlock()
|
||||
|
||||
// Start notification handler
|
||||
go l.handleNotifications()
|
||||
|
||||
// Start reconnection handler
|
||||
go l.handleReconnection()
|
||||
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Info("PostgreSQL listener connected: name=%s", l.config.GetName())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Listen subscribes to a PostgreSQL notification channel
|
||||
func (l *PostgresListener) Listen(channel string, handler NotificationHandler) error {
|
||||
l.closeMu.Lock()
|
||||
if l.closed {
|
||||
l.closeMu.Unlock()
|
||||
return fmt.Errorf("listener is closed")
|
||||
}
|
||||
l.closeMu.Unlock()
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
if l.conn == nil {
|
||||
return fmt.Errorf("listener connection is not initialized")
|
||||
}
|
||||
|
||||
// Execute LISTEN command
|
||||
_, err := l.conn.Exec(l.ctx, fmt.Sprintf("LISTEN %s", pgx.Identifier{channel}.Sanitize()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen on channel %s: %w", channel, err)
|
||||
}
|
||||
|
||||
// Store the handler
|
||||
l.channels[channel] = handler
|
||||
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Info("Listening on channel: name=%s, channel=%s", l.config.GetName(), channel)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlisten unsubscribes from a PostgreSQL notification channel
|
||||
func (l *PostgresListener) Unlisten(channel string) error {
|
||||
l.closeMu.Lock()
|
||||
if l.closed {
|
||||
l.closeMu.Unlock()
|
||||
return fmt.Errorf("listener is closed")
|
||||
}
|
||||
l.closeMu.Unlock()
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
if l.conn == nil {
|
||||
return fmt.Errorf("listener connection is not initialized")
|
||||
}
|
||||
|
||||
// Execute UNLISTEN command
|
||||
_, err := l.conn.Exec(l.ctx, fmt.Sprintf("UNLISTEN %s", pgx.Identifier{channel}.Sanitize()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unlisten from channel %s: %w", channel, err)
|
||||
}
|
||||
|
||||
// Remove the handler
|
||||
delete(l.channels, channel)
|
||||
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Info("Unlistened from channel: name=%s, channel=%s", l.config.GetName(), channel)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notify sends a notification to a PostgreSQL channel
|
||||
func (l *PostgresListener) Notify(ctx context.Context, channel string, payload string) error {
|
||||
l.closeMu.Lock()
|
||||
if l.closed {
|
||||
l.closeMu.Unlock()
|
||||
return fmt.Errorf("listener is closed")
|
||||
}
|
||||
l.closeMu.Unlock()
|
||||
|
||||
l.mu.RLock()
|
||||
conn := l.conn
|
||||
l.mu.RUnlock()
|
||||
|
||||
if conn == nil {
|
||||
return fmt.Errorf("listener connection is not initialized")
|
||||
}
|
||||
|
||||
// Execute NOTIFY command
|
||||
_, err := conn.Exec(ctx, "SELECT pg_notify($1, $2)", channel, payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to notify channel %s: %w", channel, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the listener and all subscriptions
|
||||
func (l *PostgresListener) Close() error {
|
||||
l.closeMu.Lock()
|
||||
if l.closed {
|
||||
l.closeMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
l.closed = true
|
||||
l.closeMu.Unlock()
|
||||
|
||||
// Cancel context to stop background goroutines
|
||||
l.cancel()
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
if l.conn == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlisten from all channels
|
||||
for channel := range l.channels {
|
||||
_, _ = l.conn.Exec(context.Background(), fmt.Sprintf("UNLISTEN %s", pgx.Identifier{channel}.Sanitize()))
|
||||
}
|
||||
|
||||
// Close connection
|
||||
err := l.conn.Close(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close listener connection: %w", err)
|
||||
}
|
||||
|
||||
l.conn = nil
|
||||
l.channels = make(map[string]NotificationHandler)
|
||||
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Info("PostgreSQL listener closed: name=%s", l.config.GetName())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleNotifications processes incoming notifications
|
||||
func (l *PostgresListener) handleNotifications() {
|
||||
for {
|
||||
select {
|
||||
case <-l.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
l.mu.RLock()
|
||||
conn := l.conn
|
||||
l.mu.RUnlock()
|
||||
|
||||
if conn == nil {
|
||||
// Connection not available, wait for reconnection
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait for notification with timeout
|
||||
ctx, cancel := context.WithTimeout(l.ctx, 5*time.Second)
|
||||
notification, err := conn.WaitForNotification(ctx)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
// Check if context was cancelled
|
||||
if l.ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if it's a connection error
|
||||
if pgconn.Timeout(err) {
|
||||
// Timeout is normal, continue waiting
|
||||
continue
|
||||
}
|
||||
|
||||
// Connection error, trigger reconnection
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Warn("Notification error, triggering reconnection", "error", err)
|
||||
}
|
||||
select {
|
||||
case l.reconnectC <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
// Process notification
|
||||
l.mu.RLock()
|
||||
handler, exists := l.channels[notification.Channel]
|
||||
l.mu.RUnlock()
|
||||
|
||||
if exists && handler != nil {
|
||||
// Call handler in a goroutine to avoid blocking
|
||||
go func(ch, payload string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Error("Notification handler panic: channel=%s, error=%v", ch, r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
handler(ch, payload)
|
||||
}(notification.Channel, notification.Payload)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleReconnection manages automatic reconnection
|
||||
func (l *PostgresListener) handleReconnection() {
|
||||
for {
|
||||
select {
|
||||
case <-l.ctx.Done():
|
||||
return
|
||||
case <-l.reconnectC:
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Info("Attempting to reconnect listener: name=%s", l.config.GetName())
|
||||
}
|
||||
|
||||
// Close existing connection
|
||||
l.mu.Lock()
|
||||
if l.conn != nil {
|
||||
l.conn.Close(context.Background())
|
||||
l.conn = nil
|
||||
}
|
||||
|
||||
// Save current subscriptions
|
||||
channels := make(map[string]NotificationHandler)
|
||||
for ch, handler := range l.channels {
|
||||
channels[ch] = handler
|
||||
}
|
||||
l.mu.Unlock()
|
||||
|
||||
// Attempt reconnection
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
err := l.Connect(ctx)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Error("Failed to reconnect listener: name=%s, error=%v", l.config.GetName(), err)
|
||||
}
|
||||
// Retry after delay
|
||||
time.Sleep(5 * time.Second)
|
||||
select {
|
||||
case l.reconnectC <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Resubscribe to all channels
|
||||
for channel, handler := range channels {
|
||||
if err := l.Listen(channel, handler); err != nil {
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Error("Failed to resubscribe to channel: name=%s, channel=%s, error=%v", l.config.GetName(), channel, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if l.config.GetEnableLogging() {
|
||||
logger.Info("Listener reconnected successfully: name=%s", l.config.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsConnected returns true if the listener is connected
|
||||
func (l *PostgresListener) IsConnected() bool {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
return l.conn != nil
|
||||
}
|
||||
|
||||
// Channels returns the list of channels currently being listened to
|
||||
func (l *PostgresListener) Channels() []string {
|
||||
l.mu.RLock()
|
||||
defer l.mu.RUnlock()
|
||||
|
||||
channels := make([]string, 0, len(l.channels))
|
||||
for ch := range l.channels {
|
||||
channels = append(channels, ch)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
228
pkg/dbmanager/providers/postgres_listener_example_test.go
Normal file
228
pkg/dbmanager/providers/postgres_listener_example_test.go
Normal file
@@ -0,0 +1,228 @@
|
||||
package providers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/dbmanager"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/dbmanager/providers"
|
||||
)
|
||||
|
||||
// ExamplePostgresListener_basic demonstrates basic LISTEN/NOTIFY usage
|
||||
func ExamplePostgresListener_basic() {
|
||||
// Create a connection config
|
||||
cfg := &dbmanager.ConnectionConfig{
|
||||
Name: "example",
|
||||
Type: dbmanager.DatabaseTypePostgreSQL,
|
||||
Host: "localhost",
|
||||
Port: 5432,
|
||||
User: "postgres",
|
||||
Password: "password",
|
||||
Database: "testdb",
|
||||
ConnectTimeout: 10 * time.Second,
|
||||
EnableLogging: true,
|
||||
}
|
||||
|
||||
// Create and connect PostgreSQL provider
|
||||
provider := providers.NewPostgresProvider()
|
||||
ctx := context.Background()
|
||||
|
||||
if err := provider.Connect(ctx, cfg); err != nil {
|
||||
panic(fmt.Sprintf("Failed to connect: %v", err))
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Get listener
|
||||
listener, err := provider.GetListener(ctx)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to get listener: %v", err))
|
||||
}
|
||||
|
||||
// Subscribe to a channel with a handler
|
||||
err = listener.Listen("user_events", func(channel, payload string) {
|
||||
fmt.Printf("Received notification on %s: %s\n", channel, payload)
|
||||
})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to listen: %v", err))
|
||||
}
|
||||
|
||||
// Send a notification
|
||||
err = listener.Notify(ctx, "user_events", `{"event":"user_created","user_id":123}`)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to notify: %v", err))
|
||||
}
|
||||
|
||||
// Wait for notification to be processed
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Unsubscribe from the channel
|
||||
if err := listener.Unlisten("user_events"); err != nil {
|
||||
panic(fmt.Sprintf("Failed to unlisten: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// ExamplePostgresListener_multipleChannels demonstrates listening to multiple channels
|
||||
func ExamplePostgresListener_multipleChannels() {
|
||||
cfg := &dbmanager.ConnectionConfig{
|
||||
Name: "example",
|
||||
Type: dbmanager.DatabaseTypePostgreSQL,
|
||||
Host: "localhost",
|
||||
Port: 5432,
|
||||
User: "postgres",
|
||||
Password: "password",
|
||||
Database: "testdb",
|
||||
ConnectTimeout: 10 * time.Second,
|
||||
EnableLogging: false,
|
||||
}
|
||||
|
||||
provider := providers.NewPostgresProvider()
|
||||
ctx := context.Background()
|
||||
|
||||
if err := provider.Connect(ctx, cfg); err != nil {
|
||||
panic(fmt.Sprintf("Failed to connect: %v", err))
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
listener, err := provider.GetListener(ctx)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to get listener: %v", err))
|
||||
}
|
||||
|
||||
// Listen to multiple channels
|
||||
channels := []string{"orders", "payments", "notifications"}
|
||||
for _, ch := range channels {
|
||||
channel := ch // Capture for closure
|
||||
err := listener.Listen(channel, func(ch, payload string) {
|
||||
fmt.Printf("[%s] %s\n", ch, payload)
|
||||
})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to listen on %s: %v", channel, err))
|
||||
}
|
||||
}
|
||||
|
||||
// Send notifications to different channels
|
||||
listener.Notify(ctx, "orders", "New order #12345")
|
||||
listener.Notify(ctx, "payments", "Payment received $99.99")
|
||||
listener.Notify(ctx, "notifications", "Welcome email sent")
|
||||
|
||||
// Wait for notifications
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Check active channels
|
||||
activeChannels := listener.Channels()
|
||||
fmt.Printf("Listening to %d channels: %v\n", len(activeChannels), activeChannels)
|
||||
}
|
||||
|
||||
// ExamplePostgresListener_withDBManager demonstrates usage with DBManager
|
||||
func ExamplePostgresListener_withDBManager() {
|
||||
// This example shows how to use the listener with the full DBManager
|
||||
|
||||
// Assume we have a DBManager instance and get a connection
|
||||
// conn, _ := dbMgr.Get("primary")
|
||||
|
||||
// Get the underlying provider (this would need to be exposed via the Connection interface)
|
||||
// For now, this is a conceptual example
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create provider directly for demonstration
|
||||
cfg := &dbmanager.ConnectionConfig{
|
||||
Name: "primary",
|
||||
Type: dbmanager.DatabaseTypePostgreSQL,
|
||||
Host: "localhost",
|
||||
Port: 5432,
|
||||
User: "postgres",
|
||||
Password: "password",
|
||||
Database: "myapp",
|
||||
ConnectTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
provider := providers.NewPostgresProvider()
|
||||
if err := provider.Connect(ctx, cfg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Get listener
|
||||
listener, err := provider.GetListener(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Subscribe to application events
|
||||
listener.Listen("cache_invalidation", func(channel, payload string) {
|
||||
fmt.Printf("Cache invalidation request: %s\n", payload)
|
||||
// Handle cache invalidation logic here
|
||||
})
|
||||
|
||||
listener.Listen("config_reload", func(channel, payload string) {
|
||||
fmt.Printf("Configuration reload request: %s\n", payload)
|
||||
// Handle configuration reload logic here
|
||||
})
|
||||
|
||||
// Simulate receiving notifications
|
||||
listener.Notify(ctx, "cache_invalidation", "user:123")
|
||||
listener.Notify(ctx, "config_reload", "database")
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
// ExamplePostgresListener_errorHandling demonstrates error handling and reconnection
|
||||
func ExamplePostgresListener_errorHandling() {
|
||||
cfg := &dbmanager.ConnectionConfig{
|
||||
Name: "example",
|
||||
Type: dbmanager.DatabaseTypePostgreSQL,
|
||||
Host: "localhost",
|
||||
Port: 5432,
|
||||
User: "postgres",
|
||||
Password: "password",
|
||||
Database: "testdb",
|
||||
ConnectTimeout: 10 * time.Second,
|
||||
EnableLogging: true,
|
||||
}
|
||||
|
||||
provider := providers.NewPostgresProvider()
|
||||
ctx := context.Background()
|
||||
|
||||
if err := provider.Connect(ctx, cfg); err != nil {
|
||||
panic(fmt.Sprintf("Failed to connect: %v", err))
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
listener, err := provider.GetListener(ctx)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to get listener: %v", err))
|
||||
}
|
||||
|
||||
// The listener automatically reconnects if the connection is lost
|
||||
// Subscribe with error handling in the callback
|
||||
err = listener.Listen("critical_events", func(channel, payload string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Printf("Handler panic recovered: %v\n", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Process the event
|
||||
fmt.Printf("Processing critical event: %s\n", payload)
|
||||
|
||||
// If processing fails, the panic will be caught by the defer above
|
||||
// The listener will continue to function normally
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to listen: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if listener is connected
|
||||
if listener.IsConnected() {
|
||||
fmt.Println("Listener is connected and ready")
|
||||
}
|
||||
|
||||
// Send a notification
|
||||
listener.Notify(ctx, "critical_events", "system_alert")
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
83
pkg/dbmanager/providers/provider.go
Normal file
83
pkg/dbmanager/providers/provider.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
)
|
||||
|
||||
// Common errors
|
||||
var (
|
||||
// ErrNotSQLDatabase is returned when attempting SQL operations on a non-SQL database
|
||||
ErrNotSQLDatabase = errors.New("not a SQL database")
|
||||
|
||||
// ErrNotMongoDB is returned when attempting MongoDB operations on a non-MongoDB connection
|
||||
ErrNotMongoDB = errors.New("not a MongoDB connection")
|
||||
)
|
||||
|
||||
// ConnectionStats contains statistics about a database connection
|
||||
type ConnectionStats struct {
|
||||
Name string
|
||||
Type string // Database type as string to avoid circular dependency
|
||||
Connected bool
|
||||
LastHealthCheck time.Time
|
||||
HealthCheckStatus string
|
||||
|
||||
// SQL connection pool stats
|
||||
OpenConnections int
|
||||
InUse int
|
||||
Idle int
|
||||
WaitCount int64
|
||||
WaitDuration time.Duration
|
||||
MaxIdleClosed int64
|
||||
MaxLifetimeClosed int64
|
||||
}
|
||||
|
||||
// ConnectionConfig is a minimal interface for configuration
|
||||
// The actual implementation is in dbmanager package
|
||||
type ConnectionConfig interface {
|
||||
BuildDSN() (string, error)
|
||||
GetName() string
|
||||
GetType() string
|
||||
GetHost() string
|
||||
GetPort() int
|
||||
GetUser() string
|
||||
GetPassword() string
|
||||
GetDatabase() string
|
||||
GetFilePath() string
|
||||
GetConnectTimeout() time.Duration
|
||||
GetQueryTimeout() time.Duration
|
||||
GetEnableLogging() bool
|
||||
GetEnableMetrics() bool
|
||||
GetMaxOpenConns() *int
|
||||
GetMaxIdleConns() *int
|
||||
GetConnMaxLifetime() *time.Duration
|
||||
GetConnMaxIdleTime() *time.Duration
|
||||
GetReadPreference() string
|
||||
}
|
||||
|
||||
// Provider creates and manages the underlying database connection
|
||||
type Provider interface {
|
||||
// Connect establishes the database connection
|
||||
Connect(ctx context.Context, cfg ConnectionConfig) error
|
||||
|
||||
// Close closes the connection
|
||||
Close() error
|
||||
|
||||
// HealthCheck verifies the connection is alive
|
||||
HealthCheck(ctx context.Context) error
|
||||
|
||||
// GetNative returns the native *sql.DB (SQL databases only)
|
||||
// Returns an error for non-SQL databases
|
||||
GetNative() (*sql.DB, error)
|
||||
|
||||
// GetMongo returns the MongoDB client (MongoDB only)
|
||||
// Returns an error for non-MongoDB databases
|
||||
GetMongo() (*mongo.Client, error)
|
||||
|
||||
// Stats returns connection statistics
|
||||
Stats() *ConnectionStats
|
||||
}
|
||||
177
pkg/dbmanager/providers/sqlite.go
Normal file
177
pkg/dbmanager/providers/sqlite.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
_ "github.com/glebarez/sqlite" // Pure Go SQLite driver
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// SQLiteProvider implements Provider for SQLite databases
|
||||
type SQLiteProvider struct {
|
||||
db *sql.DB
|
||||
config ConnectionConfig
|
||||
}
|
||||
|
||||
// NewSQLiteProvider creates a new SQLite provider
|
||||
func NewSQLiteProvider() *SQLiteProvider {
|
||||
return &SQLiteProvider{}
|
||||
}
|
||||
|
||||
// Connect establishes a SQLite connection
|
||||
func (p *SQLiteProvider) Connect(ctx context.Context, cfg ConnectionConfig) error {
|
||||
// Build DSN
|
||||
dsn, err := cfg.BuildDSN()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build DSN: %w", err)
|
||||
}
|
||||
|
||||
// Open database connection
|
||||
db, err := sql.Open("sqlite", dsn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open SQLite connection: %w", err)
|
||||
}
|
||||
|
||||
// Test the connection with context timeout
|
||||
connectCtx, cancel := context.WithTimeout(ctx, cfg.GetConnectTimeout())
|
||||
err = db.PingContext(connectCtx)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return fmt.Errorf("failed to ping SQLite database: %w", err)
|
||||
}
|
||||
|
||||
// Configure connection pool
|
||||
// Note: SQLite works best with MaxOpenConns=1 for write operations
|
||||
// but can handle multiple readers
|
||||
if cfg.GetMaxOpenConns() != nil {
|
||||
db.SetMaxOpenConns(*cfg.GetMaxOpenConns())
|
||||
} else {
|
||||
// Default to 1 for SQLite to avoid "database is locked" errors
|
||||
db.SetMaxOpenConns(1)
|
||||
}
|
||||
|
||||
if cfg.GetMaxIdleConns() != nil {
|
||||
db.SetMaxIdleConns(*cfg.GetMaxIdleConns())
|
||||
}
|
||||
if cfg.GetConnMaxLifetime() != nil {
|
||||
db.SetConnMaxLifetime(*cfg.GetConnMaxLifetime())
|
||||
}
|
||||
if cfg.GetConnMaxIdleTime() != nil {
|
||||
db.SetConnMaxIdleTime(*cfg.GetConnMaxIdleTime())
|
||||
}
|
||||
|
||||
// Enable WAL mode for better concurrent access
|
||||
_, err = db.ExecContext(ctx, "PRAGMA journal_mode=WAL")
|
||||
if err != nil {
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to enable WAL mode for SQLite", "error", err)
|
||||
}
|
||||
// Don't fail connection if WAL mode cannot be enabled
|
||||
}
|
||||
|
||||
// Set busy timeout to handle locked database
|
||||
_, err = db.ExecContext(ctx, "PRAGMA busy_timeout=5000")
|
||||
if err != nil {
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to set busy timeout for SQLite", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
p.db = db
|
||||
p.config = cfg
|
||||
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Info("SQLite connection established: name=%s, filepath=%s", cfg.GetName(), cfg.GetFilePath())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the SQLite connection
|
||||
func (p *SQLiteProvider) Close() error {
|
||||
if p.db == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := p.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close SQLite connection: %w", err)
|
||||
}
|
||||
|
||||
if p.config.GetEnableLogging() {
|
||||
logger.Info("SQLite connection closed: name=%s", p.config.GetName())
|
||||
}
|
||||
|
||||
p.db = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// HealthCheck verifies the SQLite connection is alive
|
||||
func (p *SQLiteProvider) HealthCheck(ctx context.Context) error {
|
||||
if p.db == nil {
|
||||
return fmt.Errorf("database connection is nil")
|
||||
}
|
||||
|
||||
// Use a short timeout for health checks
|
||||
healthCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Execute a simple query to verify the database is accessible
|
||||
var result int
|
||||
err := p.db.QueryRowContext(healthCtx, "SELECT 1").Scan(&result)
|
||||
if err != nil {
|
||||
return fmt.Errorf("health check failed: %w", err)
|
||||
}
|
||||
|
||||
if result != 1 {
|
||||
return fmt.Errorf("health check returned unexpected result: %d", result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNative returns the native *sql.DB connection
|
||||
func (p *SQLiteProvider) GetNative() (*sql.DB, error) {
|
||||
if p.db == nil {
|
||||
return nil, fmt.Errorf("database connection is not initialized")
|
||||
}
|
||||
return p.db, nil
|
||||
}
|
||||
|
||||
// GetMongo returns an error for SQLite (not a MongoDB connection)
|
||||
func (p *SQLiteProvider) GetMongo() (*mongo.Client, error) {
|
||||
return nil, ErrNotMongoDB
|
||||
}
|
||||
|
||||
// Stats returns connection pool statistics
|
||||
func (p *SQLiteProvider) Stats() *ConnectionStats {
|
||||
if p.db == nil {
|
||||
return &ConnectionStats{
|
||||
Name: p.config.GetName(),
|
||||
Type: "sqlite",
|
||||
Connected: false,
|
||||
}
|
||||
}
|
||||
|
||||
stats := p.db.Stats()
|
||||
|
||||
return &ConnectionStats{
|
||||
Name: p.config.GetName(),
|
||||
Type: "sqlite",
|
||||
Connected: true,
|
||||
OpenConnections: stats.OpenConnections,
|
||||
InUse: stats.InUse,
|
||||
Idle: stats.Idle,
|
||||
WaitCount: stats.WaitCount,
|
||||
WaitDuration: stats.WaitDuration,
|
||||
MaxIdleClosed: stats.MaxIdleClosed,
|
||||
MaxLifetimeClosed: stats.MaxLifetimeClosed,
|
||||
}
|
||||
}
|
||||
@@ -84,7 +84,7 @@ func (h *Handler) SqlQueryList(sqlquery string, options SqlQueryOptions) HTTPFun
|
||||
// Create local copy to avoid modifying the captured parameter across requests
|
||||
sqlquery := sqlquery
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 900*time.Second)
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 15*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
var dbobjlist []map[string]interface{}
|
||||
@@ -423,7 +423,7 @@ func (h *Handler) SqlQuery(sqlquery string, options SqlQueryOptions) HTTPFuncTyp
|
||||
// Create local copy to avoid modifying the captured parameter across requests
|
||||
sqlquery := sqlquery
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 600*time.Second)
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 15*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
propQry := make(map[string]string)
|
||||
@@ -522,10 +522,17 @@ func (h *Handler) SqlQuery(sqlquery string, options SqlQueryOptions) HTTPFuncTyp
|
||||
if strings.HasPrefix(kLower, "x-fieldfilter-") {
|
||||
colname := strings.ReplaceAll(kLower, "x-fieldfilter-", "")
|
||||
if strings.Contains(strings.ToLower(sqlquery), colname) {
|
||||
if val == "" || val == "0" {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("COALESCE(%s, 0) = %s", ValidSQL(colname, "colname"), ValidSQL(val, "colvalue")))
|
||||
} else {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s = %s", ValidSQL(colname, "colname"), ValidSQL(val, "colvalue")))
|
||||
switch val {
|
||||
case "0":
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("COALESCE(%s, 0) = 0", ValidSQL(colname, "colname")))
|
||||
case "":
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("(%[1]s = '' OR %[1]s IS NULL)", ValidSQL(colname, "colname")))
|
||||
default:
|
||||
if IsNumeric(val) {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s = %s", ValidSQL(colname, "colname"), ValidSQL(val, "colvalue")))
|
||||
} else {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s = '%s'", ValidSQL(colname, "colname"), ValidSQL(val, "colvalue")))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -662,7 +669,10 @@ func (h *Handler) mergePathParams(r *http.Request, sqlquery string, variables ma
|
||||
for k, v := range pathVars {
|
||||
kword := fmt.Sprintf("[%s]", k)
|
||||
if strings.Contains(sqlquery, kword) {
|
||||
sqlquery = strings.ReplaceAll(sqlquery, kword, fmt.Sprintf("%v", v))
|
||||
// Sanitize the value before replacing
|
||||
vStr := fmt.Sprintf("%v", v)
|
||||
sanitized := ValidSQL(vStr, "colvalue")
|
||||
sqlquery = strings.ReplaceAll(sqlquery, kword, sanitized)
|
||||
}
|
||||
variables[k] = v
|
||||
|
||||
@@ -690,7 +700,9 @@ func (h *Handler) mergeQueryParams(r *http.Request, sqlquery string, variables m
|
||||
// Replace in SQL if placeholder exists
|
||||
if strings.Contains(sqlquery, kword) && len(val) > 0 {
|
||||
if strings.HasPrefix(parmk, "p-") {
|
||||
sqlquery = strings.ReplaceAll(sqlquery, kword, val)
|
||||
// Sanitize the parameter value before replacing
|
||||
sanitized := ValidSQL(val, "colvalue")
|
||||
sqlquery = strings.ReplaceAll(sqlquery, kword, sanitized)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -702,15 +714,36 @@ func (h *Handler) mergeQueryParams(r *http.Request, sqlquery string, variables m
|
||||
// Apply filters if allowed
|
||||
if allowFilter && len(parmk) > 1 && strings.Contains(strings.ToLower(sqlquery), strings.ToLower(parmk)) {
|
||||
if len(parmv) > 1 {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s IN (%s)", ValidSQL(parmk, "colname"), strings.Join(parmv, ",")))
|
||||
// Sanitize each value in the IN clause with appropriate quoting
|
||||
sanitizedValues := make([]string, len(parmv))
|
||||
for i, v := range parmv {
|
||||
if IsNumeric(v) {
|
||||
// Numeric values don't need quotes
|
||||
sanitizedValues[i] = ValidSQL(v, "colvalue")
|
||||
} else {
|
||||
// String values need quotes
|
||||
sanitized := ValidSQL(v, "colvalue")
|
||||
sanitizedValues[i] = fmt.Sprintf("'%s'", sanitized)
|
||||
}
|
||||
}
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s IN (%s)", ValidSQL(parmk, "colname"), strings.Join(sanitizedValues, ",")))
|
||||
} else {
|
||||
if strings.Contains(val, "match=") {
|
||||
colval := strings.ReplaceAll(val, "match=", "")
|
||||
// Escape single quotes and backslashes for LIKE patterns
|
||||
// But don't escape wildcards % and _ which are intentional
|
||||
colval = strings.ReplaceAll(colval, "\\", "\\\\")
|
||||
colval = strings.ReplaceAll(colval, "'", "''")
|
||||
if colval != "*" {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s ILIKE '%%%s%%'", ValidSQL(parmk, "colname"), ValidSQL(colval, "colvalue")))
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s ILIKE '%%%s%%'", ValidSQL(parmk, "colname"), colval))
|
||||
}
|
||||
} else if val == "" || val == "0" {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("(%[1]s = %[2]s OR %[1]s IS NULL)", ValidSQL(parmk, "colname"), ValidSQL(val, "colvalue")))
|
||||
// For empty/zero values, treat as literal 0 or empty string with quotes
|
||||
if val == "0" {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("(%[1]s = 0 OR %[1]s IS NULL)", ValidSQL(parmk, "colname")))
|
||||
} else {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("(%[1]s = '' OR %[1]s IS NULL)", ValidSQL(parmk, "colname")))
|
||||
}
|
||||
} else {
|
||||
if IsNumeric(val) {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s = %s", ValidSQL(parmk, "colname"), ValidSQL(val, "colvalue")))
|
||||
@@ -743,16 +776,25 @@ func (h *Handler) mergeHeaderParams(r *http.Request, sqlquery string, variables
|
||||
|
||||
kword := fmt.Sprintf("[%s]", k)
|
||||
if strings.Contains(sqlquery, kword) {
|
||||
sqlquery = strings.ReplaceAll(sqlquery, kword, val)
|
||||
// Sanitize the header value before replacing
|
||||
sanitized := ValidSQL(val, "colvalue")
|
||||
sqlquery = strings.ReplaceAll(sqlquery, kword, sanitized)
|
||||
}
|
||||
|
||||
// Handle special headers
|
||||
if strings.Contains(k, "x-fieldfilter-") {
|
||||
colname := strings.ReplaceAll(k, "x-fieldfilter-", "")
|
||||
if val == "" || val == "0" {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("COALESCE(%s, 0) = %s", ValidSQL(colname, "colname"), ValidSQL(val, "colvalue")))
|
||||
} else {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s = %s", ValidSQL(colname, "colname"), ValidSQL(val, "colvalue")))
|
||||
switch val {
|
||||
case "0":
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("COALESCE(%s, 0) = 0", ValidSQL(colname, "colname")))
|
||||
case "":
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("(%[1]s = '' OR %[1]s IS NULL)", ValidSQL(colname, "colname")))
|
||||
default:
|
||||
if IsNumeric(val) {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s = %s", ValidSQL(colname, "colname"), ValidSQL(val, "colvalue")))
|
||||
} else {
|
||||
sqlquery = sqlQryWhere(sqlquery, fmt.Sprintf("%s = '%s'", ValidSQL(colname, "colname"), ValidSQL(val, "colvalue")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -782,12 +824,15 @@ func (h *Handler) mergeHeaderParams(r *http.Request, sqlquery string, variables
|
||||
func (h *Handler) replaceMetaVariables(sqlquery string, r *http.Request, userCtx *security.UserContext, metainfo map[string]interface{}, variables map[string]interface{}) string {
|
||||
if strings.Contains(sqlquery, "[p_meta_default]") {
|
||||
data, _ := json.Marshal(metainfo)
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[p_meta_default]", fmt.Sprintf("'%s'::jsonb", string(data)))
|
||||
dataStr := strings.ReplaceAll(string(data), "$META$", "/*META*/")
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[p_meta_default]", fmt.Sprintf("$META$%s$META$::jsonb", dataStr))
|
||||
}
|
||||
|
||||
if strings.Contains(sqlquery, "[json_variables]") {
|
||||
data, _ := json.Marshal(variables)
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[json_variables]", fmt.Sprintf("'%s'::jsonb", string(data)))
|
||||
dataStr := strings.ReplaceAll(string(data), "$VAR$", "/*VAR*/")
|
||||
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[json_variables]", fmt.Sprintf("$VAR$%s$VAR$::jsonb", dataStr))
|
||||
}
|
||||
|
||||
if strings.Contains(sqlquery, "[rid_user]") {
|
||||
@@ -795,7 +840,7 @@ func (h *Handler) replaceMetaVariables(sqlquery string, r *http.Request, userCtx
|
||||
}
|
||||
|
||||
if strings.Contains(sqlquery, "[user]") {
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[user]", fmt.Sprintf("'%s'", userCtx.UserName))
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[user]", fmt.Sprintf("$USR$%s$USR$", strings.ReplaceAll(userCtx.UserName, "$USR$", "/*USR*/")))
|
||||
}
|
||||
|
||||
if strings.Contains(sqlquery, "[rid_session]") {
|
||||
@@ -806,7 +851,7 @@ func (h *Handler) replaceMetaVariables(sqlquery string, r *http.Request, userCtx
|
||||
}
|
||||
|
||||
if strings.Contains(sqlquery, "[method]") {
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[method]", r.Method)
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[method]", fmt.Sprintf("$M$%s$M$", strings.ReplaceAll(r.Method, "$M$", "/*M*/")))
|
||||
}
|
||||
|
||||
if strings.Contains(sqlquery, "[post_body]") {
|
||||
@@ -819,7 +864,7 @@ func (h *Handler) replaceMetaVariables(sqlquery string, r *http.Request, userCtx
|
||||
}
|
||||
}
|
||||
}
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[post_body]", fmt.Sprintf("'%s'", bodystr))
|
||||
sqlquery = strings.ReplaceAll(sqlquery, "[post_body]", fmt.Sprintf("$PBODY$%s$PBODY$", strings.ReplaceAll(bodystr, "$PBODY$", "/*PBODY*/")))
|
||||
}
|
||||
|
||||
return sqlquery
|
||||
@@ -859,19 +904,23 @@ func ValidSQL(input, mode string) string {
|
||||
reg := regexp.MustCompile(`[^a-zA-Z0-9_\.]`)
|
||||
return reg.ReplaceAllString(input, "")
|
||||
case "colvalue":
|
||||
// For column values, escape single quotes
|
||||
return strings.ReplaceAll(input, "'", "''")
|
||||
// For column values, escape single quotes and backslashes
|
||||
// Note: Backslashes must be escaped first, then single quotes
|
||||
result := strings.ReplaceAll(input, "\\", "\\\\")
|
||||
result = strings.ReplaceAll(result, "'", "''")
|
||||
return result
|
||||
case "select":
|
||||
// For SELECT clauses, be more permissive but still safe
|
||||
// Remove semicolons and common SQL injection patterns
|
||||
dangerous := []string{";", "--", "/*", "*/", "xp_", "sp_", "DROP ", "DELETE ", "TRUNCATE ", "UPDATE ", "INSERT "}
|
||||
result := input
|
||||
for _, d := range dangerous {
|
||||
result = strings.ReplaceAll(result, d, "")
|
||||
result = strings.ReplaceAll(result, strings.ToLower(d), "")
|
||||
result = strings.ReplaceAll(result, strings.ToUpper(d), "")
|
||||
// Remove semicolons and common SQL injection patterns (case-insensitive)
|
||||
dangerous := []string{
|
||||
";", "--", "/\\*", "\\*/", "xp_", "sp_",
|
||||
"drop ", "delete ", "truncate ", "update ", "insert ",
|
||||
"exec ", "execute ", "union ", "declare ", "alter ", "create ",
|
||||
}
|
||||
return result
|
||||
// Build a single regex pattern with all dangerous keywords
|
||||
pattern := "(?i)(" + strings.Join(dangerous, "|") + ")"
|
||||
re := regexp.MustCompile(pattern)
|
||||
return re.ReplaceAllString(input, "")
|
||||
default:
|
||||
return input
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ A pluggable metrics collection system with Prometheus implementation.
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
|
||||
// Initialize Prometheus provider
|
||||
provider := metrics.NewPrometheusProvider()
|
||||
// Initialize Prometheus provider with default config
|
||||
provider := metrics.NewPrometheusProvider(nil)
|
||||
metrics.SetProvider(provider)
|
||||
|
||||
// Apply middleware to your router
|
||||
@@ -18,6 +18,59 @@ router.Use(provider.Middleware)
|
||||
http.Handle("/metrics", provider.Handler())
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
You can customize the metrics provider using a configuration struct:
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
|
||||
// Create custom configuration
|
||||
config := &metrics.Config{
|
||||
Enabled: true,
|
||||
Provider: "prometheus",
|
||||
Namespace: "myapp", // Prefix all metrics with "myapp_"
|
||||
HTTPRequestBuckets: []float64{0.01, 0.05, 0.1, 0.5, 1, 2, 5},
|
||||
DBQueryBuckets: []float64{0.001, 0.01, 0.05, 0.1, 0.5, 1},
|
||||
}
|
||||
|
||||
// Initialize with custom config
|
||||
provider := metrics.NewPrometheusProvider(config)
|
||||
metrics.SetProvider(provider)
|
||||
```
|
||||
|
||||
### Configuration Options
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
|-------|------|---------|-------------|
|
||||
| `Enabled` | `bool` | `true` | Enable/disable metrics collection |
|
||||
| `Provider` | `string` | `"prometheus"` | Metrics provider type |
|
||||
| `Namespace` | `string` | `""` | Prefix for all metric names |
|
||||
| `HTTPRequestBuckets` | `[]float64` | See below | Histogram buckets for HTTP duration (seconds) |
|
||||
| `DBQueryBuckets` | `[]float64` | See below | Histogram buckets for DB query duration (seconds) |
|
||||
|
||||
**Default HTTP Request Buckets:** `[0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]`
|
||||
|
||||
**Default DB Query Buckets:** `[0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5]`
|
||||
|
||||
### Pushgateway Configuration (Optional)
|
||||
|
||||
For batch jobs, cron tasks, or short-lived processes, you can push metrics to Prometheus Pushgateway:
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
|-------|------|---------|-------------|
|
||||
| `PushgatewayURL` | `string` | `""` | URL of Pushgateway (e.g., "http://pushgateway:9091") |
|
||||
| `PushgatewayJobName` | `string` | `"resolvespec"` | Job name for pushed metrics |
|
||||
| `PushgatewayInterval` | `int` | `0` | Auto-push interval in seconds (0 = disabled) |
|
||||
|
||||
```go
|
||||
config := &metrics.Config{
|
||||
PushgatewayURL: "http://pushgateway:9091",
|
||||
PushgatewayJobName: "batch-job",
|
||||
PushgatewayInterval: 30, // Push every 30 seconds
|
||||
}
|
||||
```
|
||||
|
||||
## Provider Interface
|
||||
|
||||
The package uses a provider interface, allowing you to plug in different metric systems:
|
||||
@@ -87,6 +140,13 @@ When using `PrometheusProvider`, the following metrics are available:
|
||||
| `cache_hits_total` | Counter | provider | Total cache hits |
|
||||
| `cache_misses_total` | Counter | provider | Total cache misses |
|
||||
| `cache_size_items` | Gauge | provider | Current cache size |
|
||||
| `events_published_total` | Counter | source, event_type | Total events published |
|
||||
| `events_processed_total` | Counter | source, event_type, status | Total events processed |
|
||||
| `event_processing_duration_seconds` | Histogram | source, event_type | Event processing duration |
|
||||
| `event_queue_size` | Gauge | - | Current event queue size |
|
||||
| `panics_total` | Counter | method | Total panics recovered |
|
||||
|
||||
**Note:** If a custom `Namespace` is configured, all metric names will be prefixed with `{namespace}_`.
|
||||
|
||||
## Prometheus Queries
|
||||
|
||||
@@ -146,8 +206,126 @@ func (c *CustomProvider) Handler() http.Handler {
|
||||
metrics.SetProvider(&CustomProvider{})
|
||||
```
|
||||
|
||||
## Pushgateway Usage
|
||||
|
||||
### Automatic Push (Batch Jobs)
|
||||
|
||||
For jobs that run periodically, use automatic pushing:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Configure with automatic pushing every 30 seconds
|
||||
config := &metrics.Config{
|
||||
Enabled: true,
|
||||
Provider: "prometheus",
|
||||
Namespace: "batch_job",
|
||||
PushgatewayURL: "http://pushgateway:9091",
|
||||
PushgatewayJobName: "data-processor",
|
||||
PushgatewayInterval: 30, // Push every 30 seconds
|
||||
}
|
||||
|
||||
provider := metrics.NewPrometheusProvider(config)
|
||||
metrics.SetProvider(provider)
|
||||
|
||||
// Ensure cleanup on exit
|
||||
defer provider.StopAutoPush()
|
||||
|
||||
// Your batch job logic here
|
||||
processBatchData()
|
||||
}
|
||||
```
|
||||
|
||||
### Manual Push (Short-lived Processes)
|
||||
|
||||
For one-time jobs or when you want manual control:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Configure without automatic pushing
|
||||
config := &metrics.Config{
|
||||
Enabled: true,
|
||||
Provider: "prometheus",
|
||||
PushgatewayURL: "http://pushgateway:9091",
|
||||
PushgatewayJobName: "migration-job",
|
||||
// PushgatewayInterval: 0 (default - no auto-push)
|
||||
}
|
||||
|
||||
provider := metrics.NewPrometheusProvider(config)
|
||||
metrics.SetProvider(provider)
|
||||
|
||||
// Run your job
|
||||
err := runMigration()
|
||||
|
||||
// Push metrics at the end
|
||||
if pushErr := provider.Push(); pushErr != nil {
|
||||
log.Printf("Failed to push metrics: %v", pushErr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Docker Compose with Pushgateway
|
||||
|
||||
```yaml
|
||||
version: '3'
|
||||
services:
|
||||
batch-job:
|
||||
build: .
|
||||
environment:
|
||||
PUSHGATEWAY_URL: "http://pushgateway:9091"
|
||||
|
||||
pushgateway:
|
||||
image: prom/pushgateway
|
||||
ports:
|
||||
- "9091:9091"
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
```
|
||||
|
||||
**prometheus.yml for Pushgateway:**
|
||||
|
||||
```yaml
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
# Scrape the pushgateway
|
||||
- job_name: 'pushgateway'
|
||||
honor_labels: true # Important: preserve job labels from pushed metrics
|
||||
static_configs:
|
||||
- targets: ['pushgateway:9091']
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
@@ -162,8 +340,8 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Initialize metrics
|
||||
provider := metrics.NewPrometheusProvider()
|
||||
// Initialize metrics with default config
|
||||
provider := metrics.NewPrometheusProvider(nil)
|
||||
metrics.SetProvider(provider)
|
||||
|
||||
// Create router
|
||||
@@ -198,6 +376,42 @@ func getUsersHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
```
|
||||
|
||||
### With Custom Configuration
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Custom metrics configuration
|
||||
metricsConfig := &metrics.Config{
|
||||
Enabled: true,
|
||||
Provider: "prometheus",
|
||||
Namespace: "myapp",
|
||||
// Custom buckets optimized for your application
|
||||
HTTPRequestBuckets: []float64{0.01, 0.05, 0.1, 0.5, 1, 2, 5, 10},
|
||||
DBQueryBuckets: []float64{0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1},
|
||||
}
|
||||
|
||||
// Initialize with custom config
|
||||
provider := metrics.NewPrometheusProvider(metricsConfig)
|
||||
metrics.SetProvider(provider)
|
||||
|
||||
router := mux.NewRouter()
|
||||
router.Use(provider.Middleware)
|
||||
router.Handle("/metrics", provider.Handler())
|
||||
|
||||
log.Fatal(http.ListenAndServe(":8080", router))
|
||||
}
|
||||
```
|
||||
|
||||
## Docker Compose Example
|
||||
|
||||
```yaml
|
||||
@@ -257,3 +471,8 @@ scrape_configs:
|
||||
4. **Performance**: Metrics collection is lock-free and highly performant
|
||||
- Safe for high-throughput applications
|
||||
- Minimal overhead (<1% in most cases)
|
||||
|
||||
5. **Pull vs Push**:
|
||||
- **Use Pull (default)**: Long-running services, web servers, microservices
|
||||
- **Use Push (Pushgateway)**: Batch jobs, cron tasks, short-lived processes, serverless functions
|
||||
- Pull is preferred for most applications as it allows Prometheus to detect if your service is down
|
||||
|
||||
64
pkg/metrics/config.go
Normal file
64
pkg/metrics/config.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package metrics
|
||||
|
||||
// Config holds configuration for the metrics provider
|
||||
type Config struct {
|
||||
// Enabled determines whether metrics collection is enabled
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
|
||||
// Provider specifies which metrics provider to use (prometheus, noop)
|
||||
Provider string `mapstructure:"provider"`
|
||||
|
||||
// Namespace is an optional prefix for all metric names
|
||||
Namespace string `mapstructure:"namespace"`
|
||||
|
||||
// HTTPRequestBuckets defines histogram buckets for HTTP request duration (in seconds)
|
||||
// Default: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]
|
||||
HTTPRequestBuckets []float64 `mapstructure:"http_request_buckets"`
|
||||
|
||||
// DBQueryBuckets defines histogram buckets for database query duration (in seconds)
|
||||
// Default: [0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5]
|
||||
DBQueryBuckets []float64 `mapstructure:"db_query_buckets"`
|
||||
|
||||
// PushgatewayURL is the URL of the Prometheus Pushgateway (optional)
|
||||
// If set, metrics will be pushed to this gateway instead of only being scraped
|
||||
// Example: "http://pushgateway:9091"
|
||||
PushgatewayURL string `mapstructure:"pushgateway_url"`
|
||||
|
||||
// PushgatewayJobName is the job name to use when pushing metrics to Pushgateway
|
||||
// Default: "resolvespec"
|
||||
PushgatewayJobName string `mapstructure:"pushgateway_job_name"`
|
||||
|
||||
// PushgatewayInterval is the interval at which to push metrics to Pushgateway
|
||||
// Only used if PushgatewayURL is set. If 0, automatic pushing is disabled.
|
||||
// Default: 0 (no automatic pushing)
|
||||
PushgatewayInterval int `mapstructure:"pushgateway_interval"`
|
||||
}
|
||||
|
||||
// DefaultConfig returns a Config with sensible defaults
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Enabled: true,
|
||||
Provider: "prometheus",
|
||||
// HTTP requests typically take longer than DB queries
|
||||
HTTPRequestBuckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
|
||||
// DB queries are usually faster
|
||||
DBQueryBuckets: []float64{0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5},
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyDefaults fills in any missing values with defaults
|
||||
func (c *Config) ApplyDefaults() {
|
||||
if c.Provider == "" {
|
||||
c.Provider = "prometheus"
|
||||
}
|
||||
if len(c.HTTPRequestBuckets) == 0 {
|
||||
c.HTTPRequestBuckets = []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}
|
||||
}
|
||||
if len(c.DBQueryBuckets) == 0 {
|
||||
c.DBQueryBuckets = []float64{0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5}
|
||||
}
|
||||
// Set default job name if pushgateway is configured but job name is empty
|
||||
if c.PushgatewayURL != "" && c.PushgatewayJobName == "" {
|
||||
c.PushgatewayJobName = "resolvespec"
|
||||
}
|
||||
}
|
||||
64
pkg/metrics/example_test.go
Normal file
64
pkg/metrics/example_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package metrics_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/metrics"
|
||||
)
|
||||
|
||||
// ExampleNewPrometheusProvider_default demonstrates using default configuration
|
||||
func ExampleNewPrometheusProvider_default() {
|
||||
// Initialize with default configuration
|
||||
provider := metrics.NewPrometheusProvider(nil)
|
||||
metrics.SetProvider(provider)
|
||||
|
||||
fmt.Println("Provider initialized with defaults")
|
||||
// Output: Provider initialized with defaults
|
||||
}
|
||||
|
||||
// ExampleNewPrometheusProvider_custom demonstrates using custom configuration
|
||||
func ExampleNewPrometheusProvider_custom() {
|
||||
// Create custom configuration
|
||||
config := &metrics.Config{
|
||||
Enabled: true,
|
||||
Provider: "prometheus",
|
||||
Namespace: "myapp",
|
||||
HTTPRequestBuckets: []float64{0.01, 0.05, 0.1, 0.5, 1, 2, 5},
|
||||
DBQueryBuckets: []float64{0.001, 0.01, 0.05, 0.1, 0.5, 1},
|
||||
}
|
||||
|
||||
// Initialize with custom configuration
|
||||
provider := metrics.NewPrometheusProvider(config)
|
||||
metrics.SetProvider(provider)
|
||||
|
||||
fmt.Println("Provider initialized with custom config")
|
||||
// Output: Provider initialized with custom config
|
||||
}
|
||||
|
||||
// ExampleDefaultConfig demonstrates getting default configuration
|
||||
func ExampleDefaultConfig() {
|
||||
config := metrics.DefaultConfig()
|
||||
fmt.Printf("Default provider: %s\n", config.Provider)
|
||||
fmt.Printf("Default enabled: %v\n", config.Enabled)
|
||||
// Output:
|
||||
// Default provider: prometheus
|
||||
// Default enabled: true
|
||||
}
|
||||
|
||||
// ExampleConfig_ApplyDefaults demonstrates applying defaults to partial config
|
||||
func ExampleConfig_ApplyDefaults() {
|
||||
// Create partial configuration
|
||||
config := &metrics.Config{
|
||||
Namespace: "myapp",
|
||||
// Other fields will be filled with defaults
|
||||
}
|
||||
|
||||
// Apply defaults
|
||||
config.ApplyDefaults()
|
||||
|
||||
fmt.Printf("Provider: %s\n", config.Provider)
|
||||
fmt.Printf("Namespace: %s\n", config.Namespace)
|
||||
// Output:
|
||||
// Provider: prometheus
|
||||
// Namespace: myapp
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/prometheus/client_golang/prometheus/push"
|
||||
)
|
||||
|
||||
// PrometheusProvider implements the Provider interface using Prometheus
|
||||
@@ -20,23 +21,51 @@ type PrometheusProvider struct {
|
||||
cacheHits *prometheus.CounterVec
|
||||
cacheMisses *prometheus.CounterVec
|
||||
cacheSize *prometheus.GaugeVec
|
||||
eventPublished *prometheus.CounterVec
|
||||
eventProcessed *prometheus.CounterVec
|
||||
eventDuration *prometheus.HistogramVec
|
||||
eventQueueSize prometheus.Gauge
|
||||
panicsTotal *prometheus.CounterVec
|
||||
|
||||
// Pushgateway fields (optional)
|
||||
pushgatewayURL string
|
||||
pushgatewayJobName string
|
||||
pusher *push.Pusher
|
||||
pushTicker *time.Ticker
|
||||
pushStop chan bool
|
||||
}
|
||||
|
||||
// NewPrometheusProvider creates a new Prometheus metrics provider
|
||||
func NewPrometheusProvider() *PrometheusProvider {
|
||||
return &PrometheusProvider{
|
||||
// If cfg is nil, default configuration will be used
|
||||
func NewPrometheusProvider(cfg *Config) *PrometheusProvider {
|
||||
// Use default config if none provided
|
||||
if cfg == nil {
|
||||
cfg = DefaultConfig()
|
||||
} else {
|
||||
// Apply defaults for any missing values
|
||||
cfg.ApplyDefaults()
|
||||
}
|
||||
|
||||
// Helper to add namespace prefix if configured
|
||||
metricName := func(name string) string {
|
||||
if cfg.Namespace != "" {
|
||||
return cfg.Namespace + "_" + name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
p := &PrometheusProvider{
|
||||
requestDuration: promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "http_request_duration_seconds",
|
||||
Name: metricName("http_request_duration_seconds"),
|
||||
Help: "HTTP request duration in seconds",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
Buckets: cfg.HTTPRequestBuckets,
|
||||
},
|
||||
[]string{"method", "path", "status"},
|
||||
),
|
||||
requestTotal: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_requests_total",
|
||||
Name: metricName("http_requests_total"),
|
||||
Help: "Total number of HTTP requests",
|
||||
},
|
||||
[]string{"method", "path", "status"},
|
||||
@@ -44,54 +73,100 @@ func NewPrometheusProvider() *PrometheusProvider {
|
||||
|
||||
requestsInFlight: promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "http_requests_in_flight",
|
||||
Name: metricName("http_requests_in_flight"),
|
||||
Help: "Current number of HTTP requests being processed",
|
||||
},
|
||||
),
|
||||
dbQueryDuration: promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "db_query_duration_seconds",
|
||||
Name: metricName("db_query_duration_seconds"),
|
||||
Help: "Database query duration in seconds",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
Buckets: cfg.DBQueryBuckets,
|
||||
},
|
||||
[]string{"operation", "table"},
|
||||
),
|
||||
dbQueryTotal: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "db_queries_total",
|
||||
Name: metricName("db_queries_total"),
|
||||
Help: "Total number of database queries",
|
||||
},
|
||||
[]string{"operation", "table", "status"},
|
||||
),
|
||||
cacheHits: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cache_hits_total",
|
||||
Name: metricName("cache_hits_total"),
|
||||
Help: "Total number of cache hits",
|
||||
},
|
||||
[]string{"provider"},
|
||||
),
|
||||
cacheMisses: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cache_misses_total",
|
||||
Name: metricName("cache_misses_total"),
|
||||
Help: "Total number of cache misses",
|
||||
},
|
||||
[]string{"provider"},
|
||||
),
|
||||
cacheSize: promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "cache_size_items",
|
||||
Name: metricName("cache_size_items"),
|
||||
Help: "Number of items in cache",
|
||||
},
|
||||
[]string{"provider"},
|
||||
),
|
||||
eventPublished: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: metricName("events_published_total"),
|
||||
Help: "Total number of events published",
|
||||
},
|
||||
[]string{"source", "event_type"},
|
||||
),
|
||||
eventProcessed: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: metricName("events_processed_total"),
|
||||
Help: "Total number of events processed",
|
||||
},
|
||||
[]string{"source", "event_type", "status"},
|
||||
),
|
||||
eventDuration: promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: metricName("event_processing_duration_seconds"),
|
||||
Help: "Event processing duration in seconds",
|
||||
Buckets: cfg.DBQueryBuckets, // Events are typically fast like DB queries
|
||||
},
|
||||
[]string{"source", "event_type"},
|
||||
),
|
||||
eventQueueSize: promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: metricName("event_queue_size"),
|
||||
Help: "Current number of events in queue",
|
||||
},
|
||||
),
|
||||
panicsTotal: promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "panics_total",
|
||||
Name: metricName("panics_total"),
|
||||
Help: "Total number of panics",
|
||||
},
|
||||
[]string{"method"},
|
||||
),
|
||||
|
||||
pushgatewayURL: cfg.PushgatewayURL,
|
||||
pushgatewayJobName: cfg.PushgatewayJobName,
|
||||
}
|
||||
|
||||
// Initialize pushgateway if configured
|
||||
if cfg.PushgatewayURL != "" {
|
||||
p.pusher = push.New(cfg.PushgatewayURL, cfg.PushgatewayJobName).
|
||||
Gatherer(prometheus.DefaultGatherer)
|
||||
|
||||
// Start automatic pushing if interval is configured
|
||||
if cfg.PushgatewayInterval > 0 {
|
||||
p.pushStop = make(chan bool)
|
||||
p.pushTicker = time.NewTicker(time.Duration(cfg.PushgatewayInterval) * time.Second)
|
||||
go p.startAutoPush()
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// ResponseWriter wraps http.ResponseWriter to capture status code
|
||||
@@ -153,6 +228,22 @@ func (p *PrometheusProvider) UpdateCacheSize(provider string, size int64) {
|
||||
p.cacheSize.WithLabelValues(provider).Set(float64(size))
|
||||
}
|
||||
|
||||
// RecordEventPublished implements Provider interface
|
||||
func (p *PrometheusProvider) RecordEventPublished(source, eventType string) {
|
||||
p.eventPublished.WithLabelValues(source, eventType).Inc()
|
||||
}
|
||||
|
||||
// RecordEventProcessed implements Provider interface
|
||||
func (p *PrometheusProvider) RecordEventProcessed(source, eventType, status string, duration time.Duration) {
|
||||
p.eventProcessed.WithLabelValues(source, eventType, status).Inc()
|
||||
p.eventDuration.WithLabelValues(source, eventType).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// UpdateEventQueueSize implements Provider interface
|
||||
func (p *PrometheusProvider) UpdateEventQueueSize(size int64) {
|
||||
p.eventQueueSize.Set(float64(size))
|
||||
}
|
||||
|
||||
// RecordPanic implements the Provider interface
|
||||
func (p *PrometheusProvider) RecordPanic(methodName string) {
|
||||
p.panicsTotal.WithLabelValues(methodName).Inc()
|
||||
@@ -185,3 +276,37 @@ func (p *PrometheusProvider) Middleware(next http.Handler) http.Handler {
|
||||
p.RecordHTTPRequest(r.Method, r.URL.Path, status, duration)
|
||||
})
|
||||
}
|
||||
|
||||
// Push manually pushes metrics to the configured Pushgateway
|
||||
// Returns an error if pushing fails or if Pushgateway is not configured
|
||||
func (p *PrometheusProvider) Push() error {
|
||||
if p.pusher == nil {
|
||||
return nil // Pushgateway not configured, silently skip
|
||||
}
|
||||
return p.pusher.Push()
|
||||
}
|
||||
|
||||
// startAutoPush runs in a goroutine and periodically pushes metrics to Pushgateway
|
||||
func (p *PrometheusProvider) startAutoPush() {
|
||||
for {
|
||||
select {
|
||||
case <-p.pushTicker.C:
|
||||
if err := p.Push(); err != nil {
|
||||
// Log error but continue pushing
|
||||
// Note: In production, you might want to use a proper logger
|
||||
_ = err
|
||||
}
|
||||
case <-p.pushStop:
|
||||
p.pushTicker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StopAutoPush stops the automatic push goroutine
|
||||
// This should be called when shutting down the application
|
||||
func (p *PrometheusProvider) StopAutoPush() {
|
||||
if p.pushStop != nil {
|
||||
close(p.pushStop)
|
||||
}
|
||||
}
|
||||
|
||||
703
pkg/resolvespec/README.md
Normal file
703
pkg/resolvespec/README.md
Normal file
@@ -0,0 +1,703 @@
|
||||
# ResolveSpec - Body-Based REST API
|
||||
|
||||
ResolveSpec provides a REST API where query options are passed in the JSON request body. This approach offers GraphQL-like flexibility while maintaining RESTful principles, making it ideal for complex queries and operations.
|
||||
|
||||
## Features
|
||||
|
||||
* **Body-Based Querying**: All query options passed via JSON request body
|
||||
* **Lifecycle Hooks**: Before/after hooks for create, read, update, delete operations
|
||||
* **Cursor Pagination**: Efficient cursor-based pagination with complex sorting
|
||||
* **Offset Pagination**: Traditional limit/offset pagination support
|
||||
* **Advanced Filtering**: Multiple operators, AND/OR logic, and custom SQL
|
||||
* **Relationship Preloading**: Load related entities with custom column selection and filters
|
||||
* **Recursive CRUD**: Automatically handle nested object graphs with foreign key resolution
|
||||
* **Computed Columns**: Define virtual columns with SQL expressions
|
||||
* **Database-Agnostic**: Works with GORM, Bun, or custom database adapters
|
||||
* **Router-Agnostic**: Integrates with any HTTP router through standard interfaces
|
||||
* **Type-Safe**: Strong type validation and conversion
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Setup with GORM
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/resolvespec"
|
||||
import "github.com/gorilla/mux"
|
||||
|
||||
// Create handler
|
||||
handler := resolvespec.NewHandlerWithGORM(db)
|
||||
|
||||
// IMPORTANT: Register models BEFORE setting up routes
|
||||
handler.registry.RegisterModel("core.users", &User{})
|
||||
handler.registry.RegisterModel("core.posts", &Post{})
|
||||
|
||||
// Setup routes
|
||||
router := mux.NewRouter()
|
||||
resolvespec.SetupMuxRoutes(router, handler, nil)
|
||||
|
||||
// Start server
|
||||
http.ListenAndServe(":8080", router)
|
||||
```
|
||||
|
||||
### Setup with Bun ORM
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/resolvespec"
|
||||
import "github.com/uptrace/bun"
|
||||
|
||||
// Create handler with Bun
|
||||
handler := resolvespec.NewHandlerWithBun(bunDB)
|
||||
|
||||
// Register models
|
||||
handler.registry.RegisterModel("core.users", &User{})
|
||||
|
||||
// Setup routes (same as GORM)
|
||||
router := mux.NewRouter()
|
||||
resolvespec.SetupMuxRoutes(router, handler, nil)
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Simple Read Request
|
||||
|
||||
```http
|
||||
POST /core/users HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
### With Preloading
|
||||
|
||||
```http
|
||||
POST /core/users HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
## Request Structure
|
||||
|
||||
### Request Format
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read|create|update|delete",
|
||||
"data": {
|
||||
// For create/update operations
|
||||
},
|
||||
"options": {
|
||||
"columns": [...],
|
||||
"preload": [...],
|
||||
"filters": [...],
|
||||
"sort": [...],
|
||||
"limit": number,
|
||||
"offset": number,
|
||||
"cursor_forward": "string",
|
||||
"cursor_backward": "string",
|
||||
"customOperators": [...],
|
||||
"computedColumns": [...]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
| Operation | Description | Requires Data | Requires ID |
|
||||
|-----------|-------------|---------------|-------------|
|
||||
| `read` | Fetch records | No | Optional (single record) |
|
||||
| `create` | Create new record(s) | Yes | No |
|
||||
| `update` | Update existing record(s) | Yes | Yes (in URL) |
|
||||
| `delete` | Delete record(s) | No | Yes (in URL) |
|
||||
|
||||
### Options Fields
|
||||
|
||||
| Field | Type | Description | Example |
|
||||
|-------|------|-------------|---------|
|
||||
| `columns` | `[]string` | Columns to select | `["id", "name", "email"]` |
|
||||
| `preload` | `[]PreloadConfig` | Relations to load | See [Preloading](#preloading) |
|
||||
| `filters` | `[]Filter` | Filter conditions | See [Filtering](#filtering) |
|
||||
| `sort` | `[]Sort` | Sort criteria | `[{"column": "created_at", "direction": "desc"}]` |
|
||||
| `limit` | `int` | Max records to return | `50` |
|
||||
| `offset` | `int` | Number of records to skip | `100` |
|
||||
| `cursor_forward` | `string` | Cursor for next page | `"12345"` |
|
||||
| `cursor_backward` | `string` | Cursor for previous page | `"12300"` |
|
||||
| `customOperators` | `[]CustomOperator` | Custom SQL conditions | See [Custom Operators](#custom-operators) |
|
||||
| `computedColumns` | `[]ComputedColumn` | Virtual columns | See [Computed Columns](#computed-columns) |
|
||||
|
||||
## Filtering
|
||||
|
||||
### Available Operators
|
||||
|
||||
| Operator | Description | Example |
|
||||
|----------|-------------|---------|
|
||||
| `eq` | Equal | `{"column": "status", "operator": "eq", "value": "active"}` |
|
||||
| `neq` | Not Equal | `{"column": "status", "operator": "neq", "value": "deleted"}` |
|
||||
| `gt` | Greater Than | `{"column": "age", "operator": "gt", "value": 18}` |
|
||||
| `gte` | Greater Than or Equal | `{"column": "age", "operator": "gte", "value": 18}` |
|
||||
| `lt` | Less Than | `{"column": "price", "operator": "lt", "value": 100}` |
|
||||
| `lte` | Less Than or Equal | `{"column": "price", "operator": "lte", "value": 100}` |
|
||||
| `like` | LIKE pattern | `{"column": "name", "operator": "like", "value": "%john%"}` |
|
||||
| `ilike` | Case-insensitive LIKE | `{"column": "email", "operator": "ilike", "value": "%@example.com"}` |
|
||||
| `in` | IN clause | `{"column": "status", "operator": "in", "value": ["active", "pending"]}` |
|
||||
| `contains` | Contains string | `{"column": "description", "operator": "contains", "value": "important"}` |
|
||||
| `startswith` | Starts with string | `{"column": "name", "operator": "startswith", "value": "John"}` |
|
||||
| `endswith` | Ends with string | `{"column": "email", "operator": "endswith", "value": "@example.com"}` |
|
||||
| `between` | Between (exclusive) | `{"column": "age", "operator": "between", "value": [18, 65]}` |
|
||||
| `betweeninclusive` | Between (inclusive) | `{"column": "price", "operator": "betweeninclusive", "value": [10, 100]}` |
|
||||
| `empty` | IS NULL or empty | `{"column": "deleted_at", "operator": "empty"}` |
|
||||
| `notempty` | IS NOT NULL | `{"column": "email", "operator": "notempty"}` |
|
||||
|
||||
### Complex Filtering Example
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "active"
|
||||
},
|
||||
{
|
||||
"column": "age",
|
||||
"operator": "gte",
|
||||
"value": 18
|
||||
},
|
||||
{
|
||||
"column": "email",
|
||||
"operator": "ilike",
|
||||
"value": "%@company.com"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Preloading
|
||||
|
||||
Load related entities with custom configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"columns": ["id", "name", "email"],
|
||||
"preload": [
|
||||
{
|
||||
"relation": "posts",
|
||||
"columns": ["id", "title", "created_at"],
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "published"
|
||||
}
|
||||
],
|
||||
"sort": [
|
||||
{
|
||||
"column": "created_at",
|
||||
"direction": "desc"
|
||||
}
|
||||
],
|
||||
"limit": 5
|
||||
},
|
||||
{
|
||||
"relation": "profile",
|
||||
"columns": ["bio", "website"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Cursor Pagination
|
||||
|
||||
Efficient pagination for large datasets:
|
||||
|
||||
### First Request (No Cursor)
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"sort": [
|
||||
{
|
||||
"column": "created_at",
|
||||
"direction": "desc"
|
||||
},
|
||||
{
|
||||
"column": "id",
|
||||
"direction": "asc"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Next Page (Forward Cursor)
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"sort": [
|
||||
{
|
||||
"column": "created_at",
|
||||
"direction": "desc"
|
||||
},
|
||||
{
|
||||
"column": "id",
|
||||
"direction": "asc"
|
||||
}
|
||||
],
|
||||
"limit": 50,
|
||||
"cursor_forward": "12345"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Previous Page (Backward Cursor)
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"sort": [
|
||||
{
|
||||
"column": "created_at",
|
||||
"direction": "desc"
|
||||
},
|
||||
{
|
||||
"column": "id",
|
||||
"direction": "asc"
|
||||
}
|
||||
],
|
||||
"limit": 50,
|
||||
"cursor_backward": "12300"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits over offset pagination**:
|
||||
* Consistent results when data changes
|
||||
* Better performance for large offsets
|
||||
* Prevents "skipped" or duplicate records
|
||||
* Works with complex sort expressions
|
||||
|
||||
## Recursive CRUD Operations
|
||||
|
||||
Automatically handle nested object graphs with intelligent foreign key resolution.
|
||||
|
||||
### Creating Nested Objects
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "create",
|
||||
"data": {
|
||||
"name": "John Doe",
|
||||
"email": "john@example.com",
|
||||
"posts": [
|
||||
{
|
||||
"title": "My First Post",
|
||||
"content": "Hello World",
|
||||
"tags": [
|
||||
{"name": "tech"},
|
||||
{"name": "programming"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Second Post",
|
||||
"content": "More content"
|
||||
}
|
||||
],
|
||||
"profile": {
|
||||
"bio": "Software Developer",
|
||||
"website": "https://example.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Per-Record Operation Control with `_request`
|
||||
|
||||
Control individual operations for each nested record:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "update",
|
||||
"data": {
|
||||
"name": "John Updated",
|
||||
"posts": [
|
||||
{
|
||||
"_request": "insert",
|
||||
"title": "New Post",
|
||||
"content": "Fresh content"
|
||||
},
|
||||
{
|
||||
"_request": "update",
|
||||
"id": 456,
|
||||
"title": "Updated Post Title"
|
||||
},
|
||||
{
|
||||
"_request": "delete",
|
||||
"id": 789
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Supported `_request` values**:
|
||||
* `insert` - Create a new related record
|
||||
* `update` - Update an existing related record
|
||||
* `delete` - Delete a related record
|
||||
* `upsert` - Create if doesn't exist, update if exists
|
||||
|
||||
**How It Works**:
|
||||
1. Automatic foreign key resolution - parent IDs propagate to children
|
||||
2. Recursive processing - handles nested relationships at any depth
|
||||
3. Transaction safety - all operations execute atomically
|
||||
4. Relationship detection - automatically detects belongsTo, hasMany, hasOne, many2many
|
||||
5. Flexible operations - mix create, update, and delete in one request
|
||||
|
||||
## Computed Columns
|
||||
|
||||
Define virtual columns using SQL expressions:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"columns": ["id", "first_name", "last_name"],
|
||||
"computedColumns": [
|
||||
{
|
||||
"name": "full_name",
|
||||
"expression": "CONCAT(first_name, ' ', last_name)"
|
||||
},
|
||||
{
|
||||
"name": "age_years",
|
||||
"expression": "EXTRACT(YEAR FROM AGE(birth_date))"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Custom Operators
|
||||
|
||||
Add custom SQL conditions when needed:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"customOperators": [
|
||||
{
|
||||
"condition": "LOWER(email) LIKE ?",
|
||||
"values": ["%@example.com"]
|
||||
},
|
||||
{
|
||||
"condition": "created_at > NOW() - INTERVAL '7 days'"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Lifecycle Hooks
|
||||
|
||||
Register hooks for all CRUD operations:
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/resolvespec"
|
||||
|
||||
// Create handler
|
||||
handler := resolvespec.NewHandlerWithGORM(db)
|
||||
|
||||
// Register a before-read hook (e.g., for authorization)
|
||||
handler.Hooks().Register(resolvespec.BeforeRead, func(ctx *resolvespec.HookContext) error {
|
||||
// Check permissions
|
||||
if !userHasPermission(ctx.Context, ctx.Entity) {
|
||||
return fmt.Errorf("unauthorized access to %s", ctx.Entity)
|
||||
}
|
||||
|
||||
// Modify query options
|
||||
if ctx.Options.Limit == nil || *ctx.Options.Limit > 100 {
|
||||
ctx.Options.Limit = ptr(100) // Enforce max limit
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// Register an after-read hook (e.g., for data transformation)
|
||||
handler.Hooks().Register(resolvespec.AfterRead, func(ctx *resolvespec.HookContext) error {
|
||||
// Transform or filter results
|
||||
if users, ok := ctx.Result.([]User); ok {
|
||||
for i := range users {
|
||||
users[i].Email = maskEmail(users[i].Email)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Register a before-create hook (e.g., for validation)
|
||||
handler.Hooks().Register(resolvespec.BeforeCreate, func(ctx *resolvespec.HookContext) error {
|
||||
// Validate data
|
||||
if user, ok := ctx.Data.(*User); ok {
|
||||
if user.Email == "" {
|
||||
return fmt.Errorf("email is required")
|
||||
}
|
||||
// Add timestamps
|
||||
user.CreatedAt = time.Now()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
**Available Hook Types**:
|
||||
* `BeforeRead`, `AfterRead`
|
||||
* `BeforeCreate`, `AfterCreate`
|
||||
* `BeforeUpdate`, `AfterUpdate`
|
||||
* `BeforeDelete`, `AfterDelete`
|
||||
|
||||
**HookContext** provides:
|
||||
* `Context`: Request context
|
||||
* `Handler`: Access to handler, database, and registry
|
||||
* `Schema`, `Entity`, `TableName`: Request info
|
||||
* `Model`: The registered model type
|
||||
* `Options`: Parsed request options (filters, sorting, etc.)
|
||||
* `ID`: Record ID (for single-record operations)
|
||||
* `Data`: Request data (for create/update)
|
||||
* `Result`: Operation result (for after hooks)
|
||||
* `Writer`: Response writer (allows hooks to modify response)
|
||||
|
||||
## Model Registration
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
ID uint `json:"id" gorm:"primaryKey"`
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Posts []Post `json:"posts,omitempty" gorm:"foreignKey:UserID"`
|
||||
Profile *Profile `json:"profile,omitempty" gorm:"foreignKey:UserID"`
|
||||
}
|
||||
|
||||
type Post struct {
|
||||
ID uint `json:"id" gorm:"primaryKey"`
|
||||
UserID uint `json:"user_id"`
|
||||
Title string `json:"title"`
|
||||
Content string `json:"content"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Tags []Tag `json:"tags,omitempty" gorm:"many2many:post_tags"`
|
||||
}
|
||||
|
||||
// Schema.Table format
|
||||
handler.registry.RegisterModel("core.users", &User{})
|
||||
handler.registry.RegisterModel("core.posts", &Post{})
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/resolvespec"
|
||||
"github.com/gorilla/mux"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type User struct {
|
||||
ID uint `json:"id" gorm:"primaryKey"`
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
Status string `json:"status"`
|
||||
Posts []Post `json:"posts,omitempty" gorm:"foreignKey:UserID"`
|
||||
}
|
||||
|
||||
type Post struct {
|
||||
ID uint `json:"id" gorm:"primaryKey"`
|
||||
UserID uint `json:"user_id"`
|
||||
Title string `json:"title"`
|
||||
Content string `json:"content"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Connect to database
|
||||
db, err := gorm.Open(postgres.Open("your-connection-string"), &gorm.Config{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create handler
|
||||
handler := resolvespec.NewHandlerWithGORM(db)
|
||||
|
||||
// Register models
|
||||
handler.registry.RegisterModel("core.users", &User{})
|
||||
handler.registry.RegisterModel("core.posts", &Post{})
|
||||
|
||||
// Add hooks
|
||||
handler.Hooks().Register(resolvespec.BeforeRead, func(ctx *resolvespec.HookContext) error {
|
||||
log.Printf("Reading %s", ctx.Entity)
|
||||
return nil
|
||||
})
|
||||
|
||||
// Setup routes
|
||||
router := mux.NewRouter()
|
||||
resolvespec.SetupMuxRoutes(router, handler, nil)
|
||||
|
||||
// Start server
|
||||
log.Println("Server starting on :8080")
|
||||
log.Fatal(http.ListenAndServe(":8080", router))
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
ResolveSpec is designed for testability:
|
||||
|
||||
```go
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUserRead(t *testing.T) {
|
||||
handler := resolvespec.NewHandlerWithGORM(testDB)
|
||||
handler.registry.RegisterModel("core.users", &User{})
|
||||
|
||||
reqBody := map[string]interface{}{
|
||||
"operation": "read",
|
||||
"options": map[string]interface{}{
|
||||
"columns": []string{"id", "name"},
|
||||
"limit": 10,
|
||||
},
|
||||
}
|
||||
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req := httptest.NewRequest("POST", "/core/users", bytes.NewReader(body))
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Test your handler...
|
||||
}
|
||||
```
|
||||
|
||||
## Router Integration
|
||||
|
||||
### Gorilla Mux
|
||||
|
||||
```go
|
||||
router := mux.NewRouter()
|
||||
resolvespec.SetupMuxRoutes(router, handler, nil)
|
||||
```
|
||||
|
||||
### BunRouter
|
||||
|
||||
```go
|
||||
router := bunrouter.New()
|
||||
resolvespec.SetupBunRouterWithResolveSpec(router, handler)
|
||||
```
|
||||
|
||||
### Custom Routers
|
||||
|
||||
```go
|
||||
// Implement custom integration using common.Request and common.ResponseWriter
|
||||
router.POST("/:schema/:entity", func(w http.ResponseWriter, r *http.Request) {
|
||||
params := extractParams(r) // Your param extraction logic
|
||||
reqAdapter := router.NewHTTPRequest(r)
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
})
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Success Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": [...],
|
||||
"metadata": {
|
||||
"total": 100,
|
||||
"filtered": 50,
|
||||
"limit": 10,
|
||||
"offset": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": {
|
||||
"code": "validation_error",
|
||||
"message": "Invalid request",
|
||||
"details": "..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
* [Main README](../../README.md) - ResolveSpec overview
|
||||
* [RestHeadSpec Package](../restheadspec/README.md) - Header-based API
|
||||
* [StaticWeb Package](../server/staticweb/README.md) - Static file server
|
||||
|
||||
## License
|
||||
|
||||
This package is part of ResolveSpec and is licensed under the MIT License.
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Success Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": [...],
|
||||
"metadata": {
|
||||
"total": 100,
|
||||
"filtered": 50,
|
||||
"limit": 10,
|
||||
"offset": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": {
|
||||
"code": "validation_error",
|
||||
"message": "Invalid request",
|
||||
"details": "..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
* [Main README](../../README.md) - ResolveSpec overview
|
||||
* [RestHeadSpec Package](../restheadspec/README.md) - Header-based API
|
||||
* [StaticWeb Package](../server/staticweb/README.md) - Static file server
|
||||
|
||||
## License
|
||||
|
||||
This package is part of ResolveSpec and is licensed under the MIT License.
|
||||
445
pkg/restheadspec/README.md
Normal file
445
pkg/restheadspec/README.md
Normal file
@@ -0,0 +1,445 @@
|
||||
# RestHeadSpec - Header-Based REST API
|
||||
|
||||
RestHeadSpec provides a REST API where all query options are passed via HTTP headers instead of the request body. This provides cleaner separation between data and metadata, making it ideal for GET requests and RESTful architectures.
|
||||
|
||||
## Features
|
||||
|
||||
* **Header-Based Querying**: All query options via HTTP headers
|
||||
* **Lifecycle Hooks**: Before/after hooks for create, read, update, delete operations
|
||||
* **Cursor Pagination**: Efficient cursor-based pagination with complex sorting
|
||||
* **Advanced Filtering**: Field filters, search operators, AND/OR logic
|
||||
* **Multiple Response Formats**: Simple, detailed, and Syncfusion-compatible responses
|
||||
* **Single Record as Object**: Automatically return single-element arrays as objects (default)
|
||||
* **Base64 Support**: Base64-encoded header values for complex queries
|
||||
* **Type-Aware Filtering**: Automatic type detection and conversion
|
||||
* **CORS Support**: Comprehensive CORS headers for cross-origin requests
|
||||
* **OPTIONS Method**: Full OPTIONS support for CORS preflight
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Setup with GORM
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/restheadspec"
|
||||
import "github.com/gorilla/mux"
|
||||
|
||||
// Create handler
|
||||
handler := restheadspec.NewHandlerWithGORM(db)
|
||||
|
||||
// IMPORTANT: Register models BEFORE setting up routes
|
||||
handler.Registry.RegisterModel("public.users", &User{})
|
||||
handler.Registry.RegisterModel("public.posts", &Post{})
|
||||
|
||||
// Setup routes
|
||||
router := mux.NewRouter()
|
||||
restheadspec.SetupMuxRoutes(router, handler, nil)
|
||||
|
||||
// Start server
|
||||
http.ListenAndServe(":8080", router)
|
||||
```
|
||||
|
||||
### Setup with Bun ORM
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/restheadspec"
|
||||
import "github.com/uptrace/bun"
|
||||
|
||||
// Create handler with Bun
|
||||
handler := restheadspec.NewHandlerWithBun(bunDB)
|
||||
|
||||
// Register models
|
||||
handler.Registry.RegisterModel("public.users", &User{})
|
||||
|
||||
// Setup routes (same as GORM)
|
||||
router := mux.NewRouter()
|
||||
restheadspec.SetupMuxRoutes(router, handler, nil)
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Simple GET Request
|
||||
|
||||
```http
|
||||
GET /public/users HTTP/1.1
|
||||
Host: api.example.com
|
||||
X-Select-Fields: id,name,email
|
||||
X-FieldFilter-Status: active
|
||||
X-Sort: -created_at
|
||||
X-Limit: 50
|
||||
```
|
||||
|
||||
### With Preloading
|
||||
|
||||
```http
|
||||
GET /public/users HTTP/1.1
|
||||
X-Select-Fields: id,name,email,department_id
|
||||
X-Preload: department:id,name
|
||||
X-FieldFilter-Status: active
|
||||
X-Limit: 50
|
||||
```
|
||||
|
||||
## Common Headers
|
||||
|
||||
| Header | Description | Example |
|
||||
|--------|-------------|---------|
|
||||
| `X-Select-Fields` | Columns to include | `id,name,email` |
|
||||
| `X-Not-Select-Fields` | Columns to exclude | `password,internal_notes` |
|
||||
| `X-FieldFilter-{col}` | Exact match filter | `X-FieldFilter-Status: active` |
|
||||
| `X-SearchFilter-{col}` | Fuzzy search (ILIKE) | `X-SearchFilter-Name: john` |
|
||||
| `X-SearchOp-{op}-{col}` | Filter with operator | `X-SearchOp-Gte-Age: 18` |
|
||||
| `X-Preload` | Preload relations | `posts:id,title` |
|
||||
| `X-Sort` | Sort columns | `-created_at,+name` |
|
||||
| `X-Limit` | Limit results | `50` |
|
||||
| `X-Offset` | Offset for pagination | `100` |
|
||||
| `X-Clean-JSON` | Remove null/empty fields | `true` |
|
||||
| `X-Single-Record-As-Object` | Return single records as objects | `false` |
|
||||
|
||||
**Available Operators**: `eq`, `neq`, `gt`, `gte`, `lt`, `lte`, `contains`, `startswith`, `endswith`, `between`, `betweeninclusive`, `in`, `empty`, `notempty`
|
||||
|
||||
For complete header documentation, see [HEADERS.md](HEADERS.md).
|
||||
|
||||
## Lifecycle Hooks
|
||||
|
||||
RestHeadSpec supports lifecycle hooks for all CRUD operations:
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/restheadspec"
|
||||
|
||||
// Create handler
|
||||
handler := restheadspec.NewHandlerWithGORM(db)
|
||||
|
||||
// Register a before-read hook (e.g., for authorization)
|
||||
handler.Hooks.Register(restheadspec.BeforeRead, func(ctx *restheadspec.HookContext) error {
|
||||
// Check permissions
|
||||
if !userHasPermission(ctx.Context, ctx.Entity) {
|
||||
return fmt.Errorf("unauthorized access to %s", ctx.Entity)
|
||||
}
|
||||
|
||||
// Modify query options
|
||||
ctx.Options.Limit = ptr(100) // Enforce max limit
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// Register an after-read hook (e.g., for data transformation)
|
||||
handler.Hooks.Register(restheadspec.AfterRead, func(ctx *restheadspec.HookContext) error {
|
||||
// Transform or filter results
|
||||
if users, ok := ctx.Result.([]User); ok {
|
||||
for i := range users {
|
||||
users[i].Email = maskEmail(users[i].Email)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Register a before-create hook (e.g., for validation)
|
||||
handler.Hooks.Register(restheadspec.BeforeCreate, func(ctx *restheadspec.HookContext) error {
|
||||
// Validate data
|
||||
if user, ok := ctx.Data.(*User); ok {
|
||||
if user.Email == "" {
|
||||
return fmt.Errorf("email is required")
|
||||
}
|
||||
// Add timestamps
|
||||
user.CreatedAt = time.Now()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
**Available Hook Types**:
|
||||
* `BeforeRead`, `AfterRead`
|
||||
* `BeforeCreate`, `AfterCreate`
|
||||
* `BeforeUpdate`, `AfterUpdate`
|
||||
* `BeforeDelete`, `AfterDelete`
|
||||
|
||||
**HookContext** provides:
|
||||
* `Context`: Request context
|
||||
* `Handler`: Access to handler, database, and registry
|
||||
* `Schema`, `Entity`, `TableName`: Request info
|
||||
* `Model`: The registered model type
|
||||
* `Options`: Parsed request options (filters, sorting, etc.)
|
||||
* `ID`: Record ID (for single-record operations)
|
||||
* `Data`: Request data (for create/update)
|
||||
* `Result`: Operation result (for after hooks)
|
||||
* `Writer`: Response writer (allows hooks to modify response)
|
||||
|
||||
## Cursor Pagination
|
||||
|
||||
RestHeadSpec supports efficient cursor-based pagination for large datasets:
|
||||
|
||||
```http
|
||||
GET /public/posts HTTP/1.1
|
||||
X-Sort: -created_at,+id
|
||||
X-Limit: 50
|
||||
X-Cursor-Forward: <cursor_token>
|
||||
```
|
||||
|
||||
**How it works**:
|
||||
1. First request returns results + cursor token in response
|
||||
2. Subsequent requests use `X-Cursor-Forward` or `X-Cursor-Backward`
|
||||
3. Cursor maintains consistent ordering even with data changes
|
||||
4. Supports complex multi-column sorting
|
||||
|
||||
**Benefits over offset pagination**:
|
||||
* Consistent results when data changes
|
||||
* Better performance for large offsets
|
||||
* Prevents "skipped" or duplicate records
|
||||
* Works with complex sort expressions
|
||||
|
||||
**Example with hooks**:
|
||||
|
||||
```go
|
||||
// Enable cursor pagination in a hook
|
||||
handler.Hooks.Register(restheadspec.BeforeRead, func(ctx *restheadspec.HookContext) error {
|
||||
// For large tables, enforce cursor pagination
|
||||
if ctx.Entity == "posts" && ctx.Options.Offset != nil && *ctx.Options.Offset > 1000 {
|
||||
return fmt.Errorf("use cursor pagination for large offsets")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
## Response Formats
|
||||
|
||||
RestHeadSpec supports multiple response formats:
|
||||
|
||||
**1. Simple Format** (`X-SimpleApi: true`):
|
||||
|
||||
```json
|
||||
[
|
||||
{ "id": 1, "name": "John" },
|
||||
{ "id": 2, "name": "Jane" }
|
||||
]
|
||||
```
|
||||
|
||||
**2. Detail Format** (`X-DetailApi: true`, default):
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": [...],
|
||||
"metadata": {
|
||||
"total": 100,
|
||||
"filtered": 100,
|
||||
"limit": 50,
|
||||
"offset": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**3. Syncfusion Format** (`X-Syncfusion: true`):
|
||||
|
||||
```json
|
||||
{
|
||||
"result": [...],
|
||||
"count": 100
|
||||
}
|
||||
```
|
||||
|
||||
## Single Record as Object (Default Behavior)
|
||||
|
||||
By default, RestHeadSpec automatically converts single-element arrays into objects for cleaner API responses.
|
||||
|
||||
**Default behavior (enabled)**:
|
||||
|
||||
```http
|
||||
GET /public/users/123
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": { "id": 123, "name": "John", "email": "john@example.com" }
|
||||
}
|
||||
```
|
||||
|
||||
**To disable** (force arrays):
|
||||
|
||||
```http
|
||||
GET /public/users/123
|
||||
X-Single-Record-As-Object: false
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": [{ "id": 123, "name": "John", "email": "john@example.com" }]
|
||||
}
|
||||
```
|
||||
|
||||
**How it works**:
|
||||
* When a query returns exactly **one record**, it's returned as an object
|
||||
* When a query returns **multiple records**, they're returned as an array
|
||||
* Set `X-Single-Record-As-Object: false` to always receive arrays
|
||||
* Works with all response formats (simple, detail, syncfusion)
|
||||
* Applies to both read operations and create/update returning clauses
|
||||
|
||||
## CORS & OPTIONS Support
|
||||
|
||||
RestHeadSpec includes comprehensive CORS support for cross-origin requests:
|
||||
|
||||
**OPTIONS Method**:
|
||||
|
||||
```http
|
||||
OPTIONS /public/users HTTP/1.1
|
||||
```
|
||||
|
||||
Returns metadata with appropriate CORS headers:
|
||||
|
||||
```http
|
||||
Access-Control-Allow-Origin: *
|
||||
Access-Control-Allow-Methods: GET, POST, OPTIONS
|
||||
Access-Control-Allow-Headers: Content-Type, Authorization, X-Select-Fields, X-FieldFilter-*, ...
|
||||
Access-Control-Max-Age: 86400
|
||||
Access-Control-Allow-Credentials: true
|
||||
```
|
||||
|
||||
**Key Features**:
|
||||
* OPTIONS returns model metadata (same as GET metadata endpoint)
|
||||
* All HTTP methods include CORS headers automatically
|
||||
* OPTIONS requests don't require authentication (CORS preflight)
|
||||
* Supports all HeadSpec custom headers (`X-Select-Fields`, `X-FieldFilter-*`, etc.)
|
||||
* 24-hour max age to reduce preflight requests
|
||||
|
||||
**Configuration**:
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
|
||||
// Get default CORS config
|
||||
corsConfig := common.DefaultCORSConfig()
|
||||
|
||||
// Customize if needed
|
||||
corsConfig.AllowedOrigins = []string{"https://example.com"}
|
||||
corsConfig.AllowedMethods = []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}
|
||||
```
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Base64 Encoding
|
||||
|
||||
For complex header values, use base64 encoding:
|
||||
|
||||
```http
|
||||
GET /public/users HTTP/1.1
|
||||
X-Select-Fields-Base64: aWQsbmFtZSxlbWFpbA==
|
||||
```
|
||||
|
||||
### AND/OR Logic
|
||||
|
||||
Combine multiple filters with AND/OR logic:
|
||||
|
||||
```http
|
||||
GET /public/users HTTP/1.1
|
||||
X-FieldFilter-Status: active
|
||||
X-SearchOp-Gte-Age: 18
|
||||
X-Filter-Logic: AND
|
||||
```
|
||||
|
||||
### Complex Preloading
|
||||
|
||||
Load nested relationships:
|
||||
|
||||
```http
|
||||
GET /public/users HTTP/1.1
|
||||
X-Preload: posts:id,title,comments:id,text,author:name
|
||||
```
|
||||
|
||||
## Model Registration
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
ID uint `json:"id" gorm:"primaryKey"`
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
Posts []Post `json:"posts,omitempty" gorm:"foreignKey:UserID"`
|
||||
}
|
||||
|
||||
// Schema.Table format
|
||||
handler.Registry.RegisterModel("public.users", &User{})
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/restheadspec"
|
||||
"github.com/gorilla/mux"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type User struct {
|
||||
ID uint `json:"id" gorm:"primaryKey"`
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Connect to database
|
||||
db, err := gorm.Open(postgres.Open("your-connection-string"), &gorm.Config{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create handler
|
||||
handler := restheadspec.NewHandlerWithGORM(db)
|
||||
|
||||
// Register models
|
||||
handler.Registry.RegisterModel("public.users", &User{})
|
||||
|
||||
// Add hooks
|
||||
handler.Hooks.Register(restheadspec.BeforeRead, func(ctx *restheadspec.HookContext) error {
|
||||
log.Printf("Reading %s", ctx.Entity)
|
||||
return nil
|
||||
})
|
||||
|
||||
// Setup routes
|
||||
router := mux.NewRouter()
|
||||
restheadspec.SetupMuxRoutes(router, handler, nil)
|
||||
|
||||
// Start server
|
||||
log.Println("Server starting on :8080")
|
||||
log.Fatal(http.ListenAndServe(":8080", router))
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
RestHeadSpec is designed for testability:
|
||||
|
||||
```go
|
||||
import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUserRead(t *testing.T) {
|
||||
handler := restheadspec.NewHandlerWithGORM(testDB)
|
||||
handler.Registry.RegisterModel("public.users", &User{})
|
||||
|
||||
req := httptest.NewRequest("GET", "/public/users", nil)
|
||||
req.Header.Set("X-Select-Fields", "id,name")
|
||||
req.Header.Set("X-Limit", "10")
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
// Test your handler...
|
||||
}
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
* [HEADERS.md](HEADERS.md) - Complete header reference
|
||||
* [Main README](../../README.md) - ResolveSpec overview
|
||||
* [ResolveSpec Package](../resolvespec/README.md) - Body-based API
|
||||
* [StaticWeb Package](../server/staticweb/README.md) - Static file server
|
||||
|
||||
## License
|
||||
|
||||
This package is part of ResolveSpec and is licensed under the MIT License.
|
||||
193
pkg/restheadspec/empty_result_test.go
Normal file
193
pkg/restheadspec/empty_result_test.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package restheadspec
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// Test that normalizeResultArray returns empty array when no records found without ID
|
||||
func TestNormalizeResultArray_EmptyArrayWhenNoID(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input interface{}
|
||||
shouldBeEmptyArr bool
|
||||
}{
|
||||
{
|
||||
name: "nil should return empty array",
|
||||
input: nil,
|
||||
shouldBeEmptyArr: true,
|
||||
},
|
||||
{
|
||||
name: "empty slice should return empty array",
|
||||
input: []*EmptyTestModel{},
|
||||
shouldBeEmptyArr: true,
|
||||
},
|
||||
{
|
||||
name: "single element should return the element",
|
||||
input: []*EmptyTestModel{{ID: 1, Name: "test"}},
|
||||
shouldBeEmptyArr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple elements should return the slice",
|
||||
input: []*EmptyTestModel{
|
||||
{ID: 1, Name: "test1"},
|
||||
{ID: 2, Name: "test2"},
|
||||
},
|
||||
shouldBeEmptyArr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := handler.normalizeResultArray(tt.input)
|
||||
|
||||
// For cases that should return empty array
|
||||
if tt.shouldBeEmptyArr {
|
||||
emptyArr, ok := result.([]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Expected empty array []interface{}{}, got %T: %v", result, result)
|
||||
return
|
||||
}
|
||||
if len(emptyArr) != 0 {
|
||||
t.Errorf("Expected empty array with length 0, got length %d", len(emptyArr))
|
||||
}
|
||||
|
||||
// Verify it serializes to [] and not null
|
||||
jsonBytes, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to marshal result: %v", err)
|
||||
return
|
||||
}
|
||||
if string(jsonBytes) != "[]" {
|
||||
t.Errorf("Expected JSON '[]', got '%s'", string(jsonBytes))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test that sendFormattedResponse adds X-No-Data-Found header
|
||||
func TestSendFormattedResponse_NoDataFoundHeader(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
// Mock ResponseWriter
|
||||
mockWriter := &MockTestResponseWriter{
|
||||
headers: make(map[string]string),
|
||||
}
|
||||
|
||||
metadata := &common.Metadata{
|
||||
Total: 0,
|
||||
Count: 0,
|
||||
Filtered: 0,
|
||||
Limit: 10,
|
||||
Offset: 0,
|
||||
}
|
||||
|
||||
options := ExtendedRequestOptions{
|
||||
RequestOptions: common.RequestOptions{},
|
||||
}
|
||||
|
||||
// Test with empty data
|
||||
emptyData := []interface{}{}
|
||||
handler.sendFormattedResponse(mockWriter, emptyData, metadata, options)
|
||||
|
||||
// Check if X-No-Data-Found header was set
|
||||
if mockWriter.headers["X-No-Data-Found"] != "true" {
|
||||
t.Errorf("Expected X-No-Data-Found header to be 'true', got '%s'", mockWriter.headers["X-No-Data-Found"])
|
||||
}
|
||||
|
||||
// Verify the body is an empty array
|
||||
if mockWriter.body == nil {
|
||||
t.Error("Expected body to be set, got nil")
|
||||
} else {
|
||||
bodyBytes, err := json.Marshal(mockWriter.body)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to marshal body: %v", err)
|
||||
}
|
||||
// The body should be wrapped in a Response object with "data" field
|
||||
bodyStr := string(bodyBytes)
|
||||
if !strings.Contains(bodyStr, `"data":[]`) && !strings.Contains(bodyStr, `"result":[]`) {
|
||||
t.Errorf("Expected body to contain empty array, got: %s", bodyStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that sendResponseWithOptions adds X-No-Data-Found header
|
||||
func TestSendResponseWithOptions_NoDataFoundHeader(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
// Mock ResponseWriter
|
||||
mockWriter := &MockTestResponseWriter{
|
||||
headers: make(map[string]string),
|
||||
}
|
||||
|
||||
metadata := &common.Metadata{}
|
||||
options := &ExtendedRequestOptions{}
|
||||
|
||||
// Test with nil data
|
||||
handler.sendResponseWithOptions(mockWriter, nil, metadata, options)
|
||||
|
||||
// Check if X-No-Data-Found header was set
|
||||
if mockWriter.headers["X-No-Data-Found"] != "true" {
|
||||
t.Errorf("Expected X-No-Data-Found header to be 'true', got '%s'", mockWriter.headers["X-No-Data-Found"])
|
||||
}
|
||||
|
||||
// Check status code is 200
|
||||
if mockWriter.statusCode != 200 {
|
||||
t.Errorf("Expected status code 200, got %d", mockWriter.statusCode)
|
||||
}
|
||||
|
||||
// Verify the body is an empty array
|
||||
if mockWriter.body == nil {
|
||||
t.Error("Expected body to be set, got nil")
|
||||
} else {
|
||||
bodyBytes, err := json.Marshal(mockWriter.body)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to marshal body: %v", err)
|
||||
}
|
||||
bodyStr := string(bodyBytes)
|
||||
if bodyStr != "[]" {
|
||||
t.Errorf("Expected body to be '[]', got: %s", bodyStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MockTestResponseWriter for testing
|
||||
type MockTestResponseWriter struct {
|
||||
headers map[string]string
|
||||
statusCode int
|
||||
body interface{}
|
||||
}
|
||||
|
||||
func (m *MockTestResponseWriter) SetHeader(key, value string) {
|
||||
m.headers[key] = value
|
||||
}
|
||||
|
||||
func (m *MockTestResponseWriter) WriteHeader(statusCode int) {
|
||||
m.statusCode = statusCode
|
||||
}
|
||||
|
||||
func (m *MockTestResponseWriter) Write(data []byte) (int, error) {
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (m *MockTestResponseWriter) WriteJSON(data interface{}) error {
|
||||
m.body = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockTestResponseWriter) UnderlyingResponseWriter() http.ResponseWriter {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EmptyTestModel for testing
|
||||
type EmptyTestModel struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
@@ -2143,12 +2143,22 @@ func (h *Handler) sendResponse(w common.ResponseWriter, data interface{}, metada
|
||||
// sendResponseWithOptions sends a response with optional formatting
|
||||
func (h *Handler) sendResponseWithOptions(w common.ResponseWriter, data interface{}, metadata *common.Metadata, options *ExtendedRequestOptions) {
|
||||
w.SetHeader("Content-Type", "application/json")
|
||||
|
||||
// Handle nil data - convert to empty array
|
||||
if data == nil {
|
||||
data = map[string]interface{}{}
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
data = []interface{}{}
|
||||
}
|
||||
|
||||
// Calculate data length after nil conversion
|
||||
dataLen := reflection.Len(data)
|
||||
|
||||
// Add X-No-Data-Found header when no records were found
|
||||
if dataLen == 0 {
|
||||
w.SetHeader("X-No-Data-Found", "true")
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// Normalize single-record arrays to objects if requested
|
||||
if options != nil && options.SingleRecordAsObject {
|
||||
data = h.normalizeResultArray(data)
|
||||
@@ -2165,7 +2175,7 @@ func (h *Handler) sendResponseWithOptions(w common.ResponseWriter, data interfac
|
||||
// Returns the single element if data is a slice/array with exactly one element, otherwise returns data unchanged
|
||||
func (h *Handler) normalizeResultArray(data interface{}) interface{} {
|
||||
if data == nil {
|
||||
return map[string]interface{}{}
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
// Use reflection to check if data is a slice or array
|
||||
@@ -2180,15 +2190,15 @@ func (h *Handler) normalizeResultArray(data interface{}) interface{} {
|
||||
// Return the single element
|
||||
return dataValue.Index(0).Interface()
|
||||
} else if dataValue.Len() == 0 {
|
||||
// Return empty object instead of empty array
|
||||
return map[string]interface{}{}
|
||||
// Keep empty array as empty array, don't convert to empty object
|
||||
return []interface{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if dataValue.Kind() == reflect.String {
|
||||
str := dataValue.String()
|
||||
if str == "" || str == "null" {
|
||||
return map[string]interface{}{}
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2199,16 +2209,25 @@ func (h *Handler) normalizeResultArray(data interface{}) interface{} {
|
||||
func (h *Handler) sendFormattedResponse(w common.ResponseWriter, data interface{}, metadata *common.Metadata, options ExtendedRequestOptions) {
|
||||
// Normalize single-record arrays to objects if requested
|
||||
httpStatus := http.StatusOK
|
||||
|
||||
// Handle nil data - convert to empty array
|
||||
if data == nil {
|
||||
data = map[string]interface{}{}
|
||||
httpStatus = http.StatusPartialContent
|
||||
} else {
|
||||
dataLen := reflection.Len(data)
|
||||
if dataLen == 0 {
|
||||
httpStatus = http.StatusPartialContent
|
||||
}
|
||||
data = []interface{}{}
|
||||
}
|
||||
|
||||
// Calculate data length after nil conversion
|
||||
// Note: This is done BEFORE normalization because X-No-Data-Found indicates
|
||||
// whether data was found in the database, not the final response format
|
||||
dataLen := reflection.Len(data)
|
||||
|
||||
// Add X-No-Data-Found header when no records were found
|
||||
if dataLen == 0 {
|
||||
w.SetHeader("X-No-Data-Found", "true")
|
||||
}
|
||||
|
||||
// Apply normalization after header is set
|
||||
// normalizeResultArray may convert single-element arrays to objects,
|
||||
// but the X-No-Data-Found header reflects the original query result
|
||||
if options.SingleRecordAsObject {
|
||||
data = h.normalizeResultArray(data)
|
||||
}
|
||||
|
||||
439
pkg/server/staticweb/README.md
Normal file
439
pkg/server/staticweb/README.md
Normal file
@@ -0,0 +1,439 @@
|
||||
# StaticWeb - Interface-Driven Static File Server
|
||||
|
||||
StaticWeb is a flexible, interface-driven Go package for serving static files over HTTP. It supports multiple filesystem backends (local, zip, embedded) and provides pluggable policies for caching, MIME types, and fallback strategies.
|
||||
|
||||
## Features
|
||||
|
||||
- **Router-Agnostic**: Works with any HTTP router through standard `http.Handler`
|
||||
- **Multiple Filesystem Providers**: Local directories, zip files, embedded filesystems
|
||||
- **Pluggable Policies**: Customizable cache, MIME type, and fallback strategies
|
||||
- **Thread-Safe**: Safe for concurrent use
|
||||
- **Resource Management**: Proper lifecycle management with `Close()` methods
|
||||
- **Extensible**: Easy to add new providers and policies
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/bitechdev/ResolveSpec/pkg/server/staticweb
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Basic Usage
|
||||
|
||||
Serve files from a local directory:
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/server/staticweb"
|
||||
|
||||
// Create service
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
// Mount a local directory
|
||||
provider, _ := staticweb.LocalProvider("./public")
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
// Use with any router
|
||||
router.PathPrefix("/").Handler(service.Handler())
|
||||
```
|
||||
|
||||
### Single Page Application (SPA)
|
||||
|
||||
Serve an SPA with HTML fallback routing:
|
||||
|
||||
```go
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
provider, _ := staticweb.LocalProvider("./dist")
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: provider,
|
||||
FallbackStrategy: staticweb.HTMLFallback("index.html"),
|
||||
})
|
||||
|
||||
// API routes take precedence (registered first)
|
||||
router.HandleFunc("/api/users", usersHandler)
|
||||
router.HandleFunc("/api/posts", postsHandler)
|
||||
|
||||
// Static files handle all other routes
|
||||
router.PathPrefix("/").Handler(service.Handler())
|
||||
```
|
||||
|
||||
## Filesystem Providers
|
||||
|
||||
### Local Directory
|
||||
|
||||
Serve files from a local filesystem directory:
|
||||
|
||||
```go
|
||||
provider, err := staticweb.LocalProvider("/var/www/static")
|
||||
```
|
||||
|
||||
### Zip File
|
||||
|
||||
Serve files from a zip archive:
|
||||
|
||||
```go
|
||||
provider, err := staticweb.ZipProvider("./static.zip")
|
||||
```
|
||||
|
||||
### Embedded Filesystem
|
||||
|
||||
Serve files from Go's embedded filesystem:
|
||||
|
||||
```go
|
||||
//go:embed assets
|
||||
var assets embed.FS
|
||||
|
||||
// Direct embedded FS
|
||||
provider, err := staticweb.EmbedProvider(&assets, "")
|
||||
|
||||
// Or from a zip file within embedded FS
|
||||
provider, err := staticweb.EmbedProvider(&assets, "assets.zip")
|
||||
```
|
||||
|
||||
## Cache Policies
|
||||
|
||||
### Simple Cache
|
||||
|
||||
Single TTL for all files:
|
||||
|
||||
```go
|
||||
cachePolicy := staticweb.SimpleCache(3600) // 1 hour
|
||||
```
|
||||
|
||||
### Extension-Based Cache
|
||||
|
||||
Different TTLs per file type:
|
||||
|
||||
```go
|
||||
rules := map[string]int{
|
||||
".html": 3600, // 1 hour
|
||||
".js": 86400, // 1 day
|
||||
".css": 86400, // 1 day
|
||||
".png": 604800, // 1 week
|
||||
}
|
||||
|
||||
cachePolicy := staticweb.ExtensionCache(rules, 3600) // default 1 hour
|
||||
```
|
||||
|
||||
### No Cache
|
||||
|
||||
Disable caching entirely:
|
||||
|
||||
```go
|
||||
cachePolicy := staticweb.NoCache()
|
||||
```
|
||||
|
||||
## Fallback Strategies
|
||||
|
||||
### HTML Fallback
|
||||
|
||||
Serve index.html for non-asset requests (SPA routing):
|
||||
|
||||
```go
|
||||
fallback := staticweb.HTMLFallback("index.html")
|
||||
```
|
||||
|
||||
### Extension-Based Fallback
|
||||
|
||||
Skip fallback for known static assets:
|
||||
|
||||
```go
|
||||
fallback := staticweb.DefaultExtensionFallback("index.html")
|
||||
```
|
||||
|
||||
Custom extensions:
|
||||
|
||||
```go
|
||||
staticExts := []string{".js", ".css", ".png", ".jpg"}
|
||||
fallback := staticweb.ExtensionFallback(staticExts, "index.html")
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Service Configuration
|
||||
|
||||
```go
|
||||
config := &staticweb.ServiceConfig{
|
||||
DefaultCacheTime: 3600,
|
||||
DefaultMIMETypes: map[string]string{
|
||||
".webp": "image/webp",
|
||||
".wasm": "application/wasm",
|
||||
},
|
||||
}
|
||||
|
||||
service := staticweb.NewService(config)
|
||||
```
|
||||
|
||||
### Mount Configuration
|
||||
|
||||
```go
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: provider,
|
||||
CachePolicy: cachePolicy, // Optional
|
||||
MIMEResolver: mimeResolver, // Optional
|
||||
FallbackStrategy: fallbackStrategy, // Optional
|
||||
})
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Multiple Mount Points
|
||||
|
||||
Serve different directories at different URL prefixes with different policies:
|
||||
|
||||
```go
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
// Long-lived assets
|
||||
assetsProvider, _ := staticweb.LocalProvider("./assets")
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/assets",
|
||||
Provider: assetsProvider,
|
||||
CachePolicy: staticweb.SimpleCache(604800), // 1 week
|
||||
})
|
||||
|
||||
// Frequently updated HTML
|
||||
htmlProvider, _ := staticweb.LocalProvider("./public")
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: htmlProvider,
|
||||
CachePolicy: staticweb.SimpleCache(300), // 5 minutes
|
||||
})
|
||||
```
|
||||
|
||||
### Custom MIME Types
|
||||
|
||||
```go
|
||||
mimeResolver := staticweb.DefaultMIMEResolver()
|
||||
mimeResolver.RegisterMIMEType(".webp", "image/webp")
|
||||
mimeResolver.RegisterMIMEType(".wasm", "application/wasm")
|
||||
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: provider,
|
||||
MIMEResolver: mimeResolver,
|
||||
})
|
||||
```
|
||||
|
||||
### Resource Cleanup
|
||||
|
||||
Always close the service when done:
|
||||
|
||||
```go
|
||||
service := staticweb.NewService(nil)
|
||||
defer service.Close()
|
||||
|
||||
// ... mount and use service ...
|
||||
```
|
||||
|
||||
Or unmount individual mount points:
|
||||
|
||||
```go
|
||||
service.Unmount("/static")
|
||||
```
|
||||
|
||||
### Reloading/Refreshing Content
|
||||
|
||||
Reload providers to pick up changes from the underlying filesystem. This is particularly useful for zip files in development:
|
||||
|
||||
```go
|
||||
// When zip file or directory contents change
|
||||
err := service.Reload()
|
||||
if err != nil {
|
||||
log.Printf("Failed to reload: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
Providers that support reloading:
|
||||
- **ZipFSProvider**: Reopens the zip file to pick up changes
|
||||
- **LocalFSProvider**: Refreshes the directory view (automatically picks up changes)
|
||||
- **EmbedFSProvider**: Not reloadable (embedded at compile time)
|
||||
|
||||
You can also reload individual providers:
|
||||
|
||||
```go
|
||||
if reloadable, ok := provider.(staticweb.ReloadableProvider); ok {
|
||||
err := reloadable.Reload()
|
||||
if err != nil {
|
||||
log.Printf("Failed to reload: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Development Workflow Example:**
|
||||
|
||||
```go
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
provider, _ := staticweb.ZipProvider("./dist.zip")
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/app",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
// In development, reload when dist.zip is rebuilt
|
||||
go func() {
|
||||
watcher := fsnotify.NewWatcher()
|
||||
watcher.Add("./dist.zip")
|
||||
|
||||
for range watcher.Events {
|
||||
log.Println("Reloading static files...")
|
||||
if err := service.Reload(); err != nil {
|
||||
log.Printf("Reload failed: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
## Router Integration
|
||||
|
||||
### Gorilla Mux
|
||||
|
||||
```go
|
||||
router := mux.NewRouter()
|
||||
router.HandleFunc("/api/users", usersHandler)
|
||||
router.PathPrefix("/").Handler(service.Handler())
|
||||
```
|
||||
|
||||
### Standard http.ServeMux
|
||||
|
||||
```go
|
||||
http.Handle("/api/", apiHandler)
|
||||
http.Handle("/", service.Handler())
|
||||
```
|
||||
|
||||
### BunRouter
|
||||
|
||||
```go
|
||||
router.GET("/api/users", usersHandler)
|
||||
router.GET("/*path", bunrouter.HTTPHandlerFunc(service.Handler()))
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Core Interfaces
|
||||
|
||||
#### FileSystemProvider
|
||||
|
||||
Abstracts the source of files:
|
||||
|
||||
```go
|
||||
type FileSystemProvider interface {
|
||||
Open(name string) (fs.File, error)
|
||||
Close() error
|
||||
Type() string
|
||||
}
|
||||
```
|
||||
|
||||
Implementations:
|
||||
- `LocalFSProvider` - Local directories
|
||||
- `ZipFSProvider` - Zip archives
|
||||
- `EmbedFSProvider` - Embedded filesystems
|
||||
|
||||
#### CachePolicy
|
||||
|
||||
Defines caching behavior:
|
||||
|
||||
```go
|
||||
type CachePolicy interface {
|
||||
GetCacheTime(path string) int
|
||||
GetCacheHeaders(path string) map[string]string
|
||||
}
|
||||
```
|
||||
|
||||
Implementations:
|
||||
- `SimpleCachePolicy` - Single TTL
|
||||
- `ExtensionBasedCachePolicy` - Per-extension TTL
|
||||
- `NoCachePolicy` - Disable caching
|
||||
|
||||
#### FallbackStrategy
|
||||
|
||||
Handles missing files:
|
||||
|
||||
```go
|
||||
type FallbackStrategy interface {
|
||||
ShouldFallback(path string) bool
|
||||
GetFallbackPath(path string) string
|
||||
}
|
||||
```
|
||||
|
||||
Implementations:
|
||||
- `NoFallback` - Return 404
|
||||
- `HTMLFallbackStrategy` - SPA routing
|
||||
- `ExtensionBasedFallback` - Skip known assets
|
||||
|
||||
#### MIMETypeResolver
|
||||
|
||||
Determines Content-Type:
|
||||
|
||||
```go
|
||||
type MIMETypeResolver interface {
|
||||
GetMIMEType(path string) string
|
||||
RegisterMIMEType(extension, mimeType string)
|
||||
}
|
||||
```
|
||||
|
||||
Implementations:
|
||||
- `DefaultMIMEResolver` - Common web types
|
||||
- `ConfigurableMIMEResolver` - Custom mappings
|
||||
|
||||
## Testing
|
||||
|
||||
### Mock Providers
|
||||
|
||||
```go
|
||||
import staticwebtesting "github.com/bitechdev/ResolveSpec/pkg/server/staticweb/testing"
|
||||
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
"app.js": []byte("console.log('test')"),
|
||||
})
|
||||
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: provider,
|
||||
})
|
||||
```
|
||||
|
||||
### Test Helpers
|
||||
|
||||
```go
|
||||
req := httptest.NewRequest("GET", "/index.html", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
service.Handler().ServeHTTP(rec, req)
|
||||
|
||||
// Assert response
|
||||
assert.Equal(t, 200, rec.Code)
|
||||
```
|
||||
|
||||
## Future Features
|
||||
|
||||
The interface-driven design allows for easy extensibility:
|
||||
|
||||
### Planned Providers
|
||||
|
||||
- **HTTPFSProvider**: Fetch files from remote HTTP servers with local caching
|
||||
- **S3FSProvider**: Serve files from S3-compatible storage
|
||||
- **CompositeProvider**: Fallback chain across multiple providers
|
||||
- **MemoryProvider**: In-memory filesystem for testing
|
||||
|
||||
### Planned Policies
|
||||
|
||||
- **TimedCachePolicy**: Different cache times by time of day
|
||||
- **ConditionalCachePolicy**: Smart cache based on file size/type
|
||||
- **RegexFallbackStrategy**: Pattern-based routing
|
||||
|
||||
## License
|
||||
|
||||
See the main repository for license information.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome! The interface-driven design makes it easy to add new providers and policies without modifying existing code.
|
||||
99
pkg/server/staticweb/config.go
Normal file
99
pkg/server/staticweb/config.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package staticweb
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server/staticweb/policies"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server/staticweb/providers"
|
||||
)
|
||||
|
||||
// ServiceConfig configures the static file service.
|
||||
type ServiceConfig struct {
|
||||
// DefaultCacheTime is the default cache duration in seconds.
|
||||
// Used when a mount point doesn't specify a custom CachePolicy.
|
||||
// Default: 172800 (48 hours)
|
||||
DefaultCacheTime int
|
||||
|
||||
// DefaultMIMETypes is a map of file extensions to MIME types.
|
||||
// These are added to the default MIME resolver.
|
||||
// Extensions should include the leading dot (e.g., ".webp").
|
||||
DefaultMIMETypes map[string]string
|
||||
}
|
||||
|
||||
// DefaultServiceConfig returns a ServiceConfig with sensible defaults.
|
||||
func DefaultServiceConfig() *ServiceConfig {
|
||||
return &ServiceConfig{
|
||||
DefaultCacheTime: 172800, // 48 hours
|
||||
DefaultMIMETypes: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Validate checks if the ServiceConfig is valid.
|
||||
func (c *ServiceConfig) Validate() error {
|
||||
if c.DefaultCacheTime < 0 {
|
||||
return fmt.Errorf("DefaultCacheTime cannot be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper constructor functions for providers
|
||||
|
||||
// LocalProvider creates a FileSystemProvider for a local directory.
|
||||
func LocalProvider(path string) (FileSystemProvider, error) {
|
||||
return providers.NewLocalFSProvider(path)
|
||||
}
|
||||
|
||||
// ZipProvider creates a FileSystemProvider for a zip file.
|
||||
func ZipProvider(zipPath string) (FileSystemProvider, error) {
|
||||
return providers.NewZipFSProvider(zipPath)
|
||||
}
|
||||
|
||||
// EmbedProvider creates a FileSystemProvider for an embedded filesystem.
|
||||
// If zipFile is empty, the embedded FS is used directly.
|
||||
// If zipFile is specified, it's treated as a path to a zip file within the embedded FS.
|
||||
// The embedFS parameter can be any fs.FS, but is typically *embed.FS.
|
||||
func EmbedProvider(embedFS *embed.FS, zipFile string) (FileSystemProvider, error) {
|
||||
return providers.NewEmbedFSProvider(embedFS, zipFile)
|
||||
}
|
||||
|
||||
// Policy constructor functions
|
||||
|
||||
// SimpleCache creates a simple cache policy with the given TTL in seconds.
|
||||
func SimpleCache(seconds int) CachePolicy {
|
||||
return policies.NewSimpleCachePolicy(seconds)
|
||||
}
|
||||
|
||||
// ExtensionCache creates an extension-based cache policy.
|
||||
// rules maps file extensions (with leading dot) to cache times in seconds.
|
||||
// defaultTime is used for files that don't match any rule.
|
||||
func ExtensionCache(rules map[string]int, defaultTime int) CachePolicy {
|
||||
return policies.NewExtensionBasedCachePolicy(rules, defaultTime)
|
||||
}
|
||||
|
||||
// NoCache creates a cache policy that disables all caching.
|
||||
func NoCache() CachePolicy {
|
||||
return policies.NewNoCachePolicy()
|
||||
}
|
||||
|
||||
// HTMLFallback creates a fallback strategy for SPAs that serves the given index file.
|
||||
func HTMLFallback(indexFile string) FallbackStrategy {
|
||||
return policies.NewHTMLFallbackStrategy(indexFile)
|
||||
}
|
||||
|
||||
// ExtensionFallback creates an extension-based fallback strategy.
|
||||
// staticExtensions is a list of file extensions that should NOT use fallback.
|
||||
// fallbackPath is the file to serve when fallback is triggered.
|
||||
func ExtensionFallback(staticExtensions []string, fallbackPath string) FallbackStrategy {
|
||||
return policies.NewExtensionBasedFallback(staticExtensions, fallbackPath)
|
||||
}
|
||||
|
||||
// DefaultExtensionFallback creates an extension-based fallback with common web asset extensions.
|
||||
func DefaultExtensionFallback(fallbackPath string) FallbackStrategy {
|
||||
return policies.NewDefaultExtensionBasedFallback(fallbackPath)
|
||||
}
|
||||
|
||||
// DefaultMIMEResolver creates a MIME resolver with common web file types.
|
||||
func DefaultMIMEResolver() MIMETypeResolver {
|
||||
return policies.NewDefaultMIMEResolver()
|
||||
}
|
||||
60
pkg/server/staticweb/example_reload_test.go
Normal file
60
pkg/server/staticweb/example_reload_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package staticweb_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server/staticweb"
|
||||
staticwebtesting "github.com/bitechdev/ResolveSpec/pkg/server/staticweb/testing"
|
||||
)
|
||||
|
||||
// Example_reload demonstrates reloading content when files change.
|
||||
func Example_reload() {
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
// Create a provider
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"version.txt": []byte("v1.0.0"),
|
||||
})
|
||||
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
// Simulate updating the file
|
||||
provider.AddFile("version.txt", []byte("v2.0.0"))
|
||||
|
||||
// Reload to pick up changes (in real usage with zip files)
|
||||
err := service.Reload()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to reload: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("Successfully reloaded static files")
|
||||
}
|
||||
|
||||
// Output: Successfully reloaded static files
|
||||
}
|
||||
|
||||
// Example_reloadZip demonstrates reloading a zip file provider.
|
||||
func Example_reloadZip() {
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
// In production, you would use:
|
||||
// provider, _ := staticweb.ZipProvider("./dist.zip")
|
||||
// For this example, we use a mock
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"app.js": []byte("console.log('v1')"),
|
||||
})
|
||||
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/app",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
fmt.Println("Serving from zip file")
|
||||
|
||||
// When the zip file is updated, call Reload()
|
||||
// service.Reload()
|
||||
|
||||
// Output: Serving from zip file
|
||||
}
|
||||
138
pkg/server/staticweb/example_test.go
Normal file
138
pkg/server/staticweb/example_test.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package staticweb_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server/staticweb"
|
||||
staticwebtesting "github.com/bitechdev/ResolveSpec/pkg/server/staticweb/testing"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// Example_basic demonstrates serving files from a local directory.
|
||||
func Example_basic() {
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
// Using mock provider for example purposes
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
})
|
||||
|
||||
_ = service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
router := mux.NewRouter()
|
||||
router.PathPrefix("/").Handler(service.Handler())
|
||||
|
||||
fmt.Println("Serving files from ./public at /static")
|
||||
// Output: Serving files from ./public at /static
|
||||
}
|
||||
|
||||
// Example_spa demonstrates an SPA with HTML fallback routing.
|
||||
func Example_spa() {
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
// Using mock provider for example purposes
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>app</html>"),
|
||||
})
|
||||
|
||||
_ = service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: provider,
|
||||
FallbackStrategy: staticweb.HTMLFallback("index.html"),
|
||||
})
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
// API routes take precedence
|
||||
router.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("users"))
|
||||
})
|
||||
|
||||
// Static files handle all other routes
|
||||
router.PathPrefix("/").Handler(service.Handler())
|
||||
|
||||
fmt.Println("SPA with fallback to index.html")
|
||||
// Output: SPA with fallback to index.html
|
||||
}
|
||||
|
||||
// Example_multiple demonstrates multiple mount points with different policies.
|
||||
func Example_multiple() {
|
||||
service := staticweb.NewService(&staticweb.ServiceConfig{
|
||||
DefaultCacheTime: 3600,
|
||||
})
|
||||
|
||||
// Assets with long cache (using mock for example)
|
||||
assetsProvider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"app.js": []byte("console.log('test')"),
|
||||
})
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/assets",
|
||||
Provider: assetsProvider,
|
||||
CachePolicy: staticweb.SimpleCache(604800), // 1 week
|
||||
})
|
||||
|
||||
// HTML with short cache (using mock for example)
|
||||
htmlProvider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
})
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: htmlProvider,
|
||||
CachePolicy: staticweb.SimpleCache(300), // 5 minutes
|
||||
})
|
||||
|
||||
fmt.Println("Multiple mount points configured")
|
||||
// Output: Multiple mount points configured
|
||||
}
|
||||
|
||||
// Example_zip demonstrates serving from a zip file (concept).
|
||||
func Example_zip() {
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
// For actual usage, you would use:
|
||||
// provider, err := staticweb.ZipProvider("./static.zip")
|
||||
// For this example, we use a mock
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"file.txt": []byte("content"),
|
||||
})
|
||||
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
fmt.Println("Serving from zip file")
|
||||
// Output: Serving from zip file
|
||||
}
|
||||
|
||||
// Example_extensionCache demonstrates extension-based caching.
|
||||
func Example_extensionCache() {
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
// Using mock provider for example purposes
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
"app.js": []byte("console.log('test')"),
|
||||
})
|
||||
|
||||
// Different cache times per file type
|
||||
cacheRules := map[string]int{
|
||||
".html": 3600, // 1 hour
|
||||
".js": 86400, // 1 day
|
||||
".css": 86400, // 1 day
|
||||
".png": 604800, // 1 week
|
||||
}
|
||||
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: provider,
|
||||
CachePolicy: staticweb.ExtensionCache(cacheRules, 3600), // default 1 hour
|
||||
})
|
||||
|
||||
fmt.Println("Extension-based caching configured")
|
||||
// Output: Extension-based caching configured
|
||||
}
|
||||
130
pkg/server/staticweb/interfaces.go
Normal file
130
pkg/server/staticweb/interfaces.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package staticweb
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// FileSystemProvider abstracts the source of files (local, zip, embedded, future: http, s3)
|
||||
// Implementations must be safe for concurrent use.
|
||||
type FileSystemProvider interface {
|
||||
// Open opens the named file.
|
||||
// The name is always a slash-separated path relative to the filesystem root.
|
||||
Open(name string) (fs.File, error)
|
||||
|
||||
// Close releases any resources held by the provider.
|
||||
// After Close is called, the provider should not be used.
|
||||
Close() error
|
||||
|
||||
// Type returns the provider type (e.g., "local", "zip", "embed", "http", "s3").
|
||||
// This is primarily for debugging and logging purposes.
|
||||
Type() string
|
||||
}
|
||||
|
||||
// ReloadableProvider is an optional interface that providers can implement
|
||||
// to support reloading/refreshing their content.
|
||||
// This is useful for development workflows where the underlying files may change.
|
||||
type ReloadableProvider interface {
|
||||
FileSystemProvider
|
||||
|
||||
// Reload refreshes the provider's content from the underlying source.
|
||||
// For zip files, this reopens the zip archive.
|
||||
// For local directories, this refreshes the filesystem view.
|
||||
// Returns an error if the reload fails.
|
||||
Reload() error
|
||||
}
|
||||
|
||||
// CachePolicy defines how files should be cached by browsers and proxies.
|
||||
// Implementations must be safe for concurrent use.
|
||||
type CachePolicy interface {
|
||||
// GetCacheTime returns the cache duration in seconds for the given path.
|
||||
// A value of 0 means no caching.
|
||||
// A negative value can be used to indicate browser should revalidate.
|
||||
GetCacheTime(path string) int
|
||||
|
||||
// GetCacheHeaders returns additional cache-related HTTP headers for the given path.
|
||||
// Common headers include "Cache-Control", "Expires", "ETag", etc.
|
||||
// Returns nil if no additional headers are needed.
|
||||
GetCacheHeaders(path string) map[string]string
|
||||
}
|
||||
|
||||
// MIMETypeResolver determines the Content-Type for files.
|
||||
// Implementations must be safe for concurrent use.
|
||||
type MIMETypeResolver interface {
|
||||
// GetMIMEType returns the MIME type for the given file path.
|
||||
// Returns empty string if the MIME type cannot be determined.
|
||||
GetMIMEType(path string) string
|
||||
|
||||
// RegisterMIMEType registers a custom MIME type for the given file extension.
|
||||
// The extension should include the leading dot (e.g., ".webp").
|
||||
RegisterMIMEType(extension, mimeType string)
|
||||
}
|
||||
|
||||
// FallbackStrategy handles requests for files that don't exist.
|
||||
// This is commonly used for Single Page Applications (SPAs) that use client-side routing.
|
||||
// Implementations must be safe for concurrent use.
|
||||
type FallbackStrategy interface {
|
||||
// ShouldFallback determines if a fallback should be attempted for the given path.
|
||||
// Returns true if the request should be handled by fallback logic.
|
||||
ShouldFallback(path string) bool
|
||||
|
||||
// GetFallbackPath returns the path to serve instead of the originally requested path.
|
||||
// This is only called if ShouldFallback returns true.
|
||||
GetFallbackPath(path string) string
|
||||
}
|
||||
|
||||
// MountConfig configures a single mount point.
|
||||
// A mount point connects a URL prefix to a filesystem provider with optional policies.
|
||||
type MountConfig struct {
|
||||
// URLPrefix is the URL path prefix where the filesystem should be mounted.
|
||||
// Must start with "/" (e.g., "/static", "/", "/assets").
|
||||
// Requests starting with this prefix will be handled by this mount point.
|
||||
URLPrefix string
|
||||
|
||||
// Provider is the filesystem provider that supplies the files.
|
||||
// Required.
|
||||
Provider FileSystemProvider
|
||||
|
||||
// CachePolicy determines how files should be cached.
|
||||
// If nil, the service's default cache policy is used.
|
||||
CachePolicy CachePolicy
|
||||
|
||||
// MIMEResolver determines Content-Type headers for files.
|
||||
// If nil, the service's default MIME resolver is used.
|
||||
MIMEResolver MIMETypeResolver
|
||||
|
||||
// FallbackStrategy handles requests for missing files.
|
||||
// If nil, no fallback is performed and 404 responses are returned.
|
||||
FallbackStrategy FallbackStrategy
|
||||
}
|
||||
|
||||
// StaticFileService manages multiple mount points and serves static files.
|
||||
// The service is safe for concurrent use.
|
||||
type StaticFileService interface {
|
||||
// Mount adds a new mount point with the given configuration.
|
||||
// Returns an error if the URLPrefix is already mounted or if the config is invalid.
|
||||
Mount(config MountConfig) error
|
||||
|
||||
// Unmount removes the mount point at the given URL prefix.
|
||||
// Returns an error if no mount point exists at that prefix.
|
||||
// Automatically calls Close() on the provider to release resources.
|
||||
Unmount(urlPrefix string) error
|
||||
|
||||
// ListMounts returns a sorted list of all mounted URL prefixes.
|
||||
ListMounts() []string
|
||||
|
||||
// Reload reinitializes all filesystem providers.
|
||||
// This can be used to pick up changes in the underlying filesystems.
|
||||
// Not all providers may support reloading.
|
||||
Reload() error
|
||||
|
||||
// Close releases all resources held by the service and all mounted providers.
|
||||
// After Close is called, the service should not be used.
|
||||
Close() error
|
||||
|
||||
// Handler returns an http.Handler that serves static files from all mount points.
|
||||
// The handler performs longest-prefix matching to find the appropriate mount point.
|
||||
// If no mount point matches, the handler returns without writing a response,
|
||||
// allowing other handlers (like API routes) to process the request.
|
||||
Handler() http.Handler
|
||||
}
|
||||
235
pkg/server/staticweb/mount.go
Normal file
235
pkg/server/staticweb/mount.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package staticweb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// mountPoint represents a mounted filesystem at a specific URL prefix.
|
||||
type mountPoint struct {
|
||||
urlPrefix string
|
||||
provider FileSystemProvider
|
||||
cachePolicy CachePolicy
|
||||
mimeResolver MIMETypeResolver
|
||||
fallbackStrategy FallbackStrategy
|
||||
fileServer http.Handler
|
||||
}
|
||||
|
||||
// newMountPoint creates a new mount point with the given configuration.
|
||||
func newMountPoint(config MountConfig, defaults *ServiceConfig) (*mountPoint, error) {
|
||||
if config.URLPrefix == "" {
|
||||
return nil, fmt.Errorf("URLPrefix cannot be empty")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(config.URLPrefix, "/") {
|
||||
return nil, fmt.Errorf("URLPrefix must start with /")
|
||||
}
|
||||
|
||||
if config.Provider == nil {
|
||||
return nil, fmt.Errorf("provider cannot be nil")
|
||||
}
|
||||
|
||||
mp := &mountPoint{
|
||||
urlPrefix: config.URLPrefix,
|
||||
provider: config.Provider,
|
||||
cachePolicy: config.CachePolicy,
|
||||
mimeResolver: config.MIMEResolver,
|
||||
fallbackStrategy: config.FallbackStrategy,
|
||||
}
|
||||
|
||||
// Apply defaults if policies are not specified
|
||||
if mp.cachePolicy == nil && defaults != nil {
|
||||
mp.cachePolicy = defaultCachePolicy(defaults.DefaultCacheTime)
|
||||
}
|
||||
|
||||
if mp.mimeResolver == nil {
|
||||
mp.mimeResolver = defaultMIMEResolver()
|
||||
}
|
||||
|
||||
// Create an http.FileServer for serving files
|
||||
mp.fileServer = http.FileServer(http.FS(config.Provider))
|
||||
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// ServeHTTP handles HTTP requests for files in this mount point.
|
||||
func (m *mountPoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Strip the URL prefix to get the file path
|
||||
filePath := strings.TrimPrefix(r.URL.Path, m.urlPrefix)
|
||||
if filePath == "" {
|
||||
filePath = "/"
|
||||
}
|
||||
|
||||
// Clean the path
|
||||
filePath = path.Clean(filePath)
|
||||
|
||||
// Try to open the file
|
||||
file, err := m.provider.Open(strings.TrimPrefix(filePath, "/"))
|
||||
if err != nil {
|
||||
// File doesn't exist - check if we should use fallback
|
||||
if m.fallbackStrategy != nil && m.fallbackStrategy.ShouldFallback(filePath) {
|
||||
fallbackPath := m.fallbackStrategy.GetFallbackPath(filePath)
|
||||
file, err = m.provider.Open(strings.TrimPrefix(fallbackPath, "/"))
|
||||
if err == nil {
|
||||
// Successfully opened fallback file
|
||||
defer file.Close()
|
||||
m.serveFile(w, r, fallbackPath, file)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// No fallback or fallback failed - return 404
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Serve the file
|
||||
m.serveFile(w, r, filePath, file)
|
||||
}
|
||||
|
||||
// serveFile serves a single file with appropriate headers.
|
||||
func (m *mountPoint) serveFile(w http.ResponseWriter, r *http.Request, filePath string, file fs.File) {
|
||||
// Get file info
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// If it's a directory, try to serve index.html
|
||||
if stat.IsDir() {
|
||||
indexPath := path.Join(filePath, "index.html")
|
||||
indexFile, err := m.provider.Open(strings.TrimPrefix(indexPath, "/"))
|
||||
if err != nil {
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
defer indexFile.Close()
|
||||
|
||||
indexStat, err := indexFile.Stat()
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
filePath = indexPath
|
||||
stat = indexStat
|
||||
file = indexFile
|
||||
}
|
||||
|
||||
// Set Content-Type header using MIME resolver
|
||||
if m.mimeResolver != nil {
|
||||
if mimeType := m.mimeResolver.GetMIMEType(filePath); mimeType != "" {
|
||||
w.Header().Set("Content-Type", mimeType)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply cache policy
|
||||
if m.cachePolicy != nil {
|
||||
headers := m.cachePolicy.GetCacheHeaders(filePath)
|
||||
for key, value := range headers {
|
||||
w.Header().Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Serve the content
|
||||
if seeker, ok := file.(interface {
|
||||
io.ReadSeeker
|
||||
}); ok {
|
||||
http.ServeContent(w, r, stat.Name(), stat.ModTime(), seeker)
|
||||
} else {
|
||||
// If the file doesn't support seeking, we need to read it all into memory
|
||||
data, err := fs.ReadFile(m.provider, strings.TrimPrefix(filePath, "/"))
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
http.ServeContent(w, r, stat.Name(), stat.ModTime(), strings.NewReader(string(data)))
|
||||
}
|
||||
}
|
||||
|
||||
// Close releases resources held by the mount point.
|
||||
func (m *mountPoint) Close() error {
|
||||
if m.provider != nil {
|
||||
return m.provider.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// defaultCachePolicy creates a default simple cache policy.
|
||||
func defaultCachePolicy(cacheTime int) CachePolicy {
|
||||
// Import the policies package type - we'll need to use the concrete type
|
||||
// For now, create a simple inline implementation
|
||||
return &simpleCachePolicy{cacheTime: cacheTime}
|
||||
}
|
||||
|
||||
// simpleCachePolicy is a simple inline implementation of CachePolicy
|
||||
type simpleCachePolicy struct {
|
||||
cacheTime int
|
||||
}
|
||||
|
||||
func (p *simpleCachePolicy) GetCacheTime(path string) int {
|
||||
return p.cacheTime
|
||||
}
|
||||
|
||||
func (p *simpleCachePolicy) GetCacheHeaders(path string) map[string]string {
|
||||
if p.cacheTime <= 0 {
|
||||
return map[string]string{
|
||||
"Cache-Control": "no-cache, no-store, must-revalidate",
|
||||
"Pragma": "no-cache",
|
||||
"Expires": "0",
|
||||
}
|
||||
}
|
||||
return map[string]string{
|
||||
"Cache-Control": fmt.Sprintf("public, max-age=%d", p.cacheTime),
|
||||
}
|
||||
}
|
||||
|
||||
// defaultMIMEResolver creates a default MIME resolver.
|
||||
func defaultMIMEResolver() MIMETypeResolver {
|
||||
// Import the policies package type - we'll need to use the concrete type
|
||||
// For now, create a simple inline implementation
|
||||
return &simpleMIMEResolver{
|
||||
types: map[string]string{
|
||||
".js": "application/javascript",
|
||||
".mjs": "application/javascript",
|
||||
".cjs": "application/javascript",
|
||||
".css": "text/css",
|
||||
".html": "text/html",
|
||||
".htm": "text/html",
|
||||
".json": "application/json",
|
||||
".png": "image/png",
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".gif": "image/gif",
|
||||
".svg": "image/svg+xml",
|
||||
".ico": "image/x-icon",
|
||||
".txt": "text/plain",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// simpleMIMEResolver is a simple inline implementation of MIMETypeResolver
|
||||
type simpleMIMEResolver struct {
|
||||
types map[string]string
|
||||
}
|
||||
|
||||
func (r *simpleMIMEResolver) GetMIMEType(filePath string) string {
|
||||
ext := strings.ToLower(path.Ext(filePath))
|
||||
if mimeType, ok := r.types[ext]; ok {
|
||||
return mimeType
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (r *simpleMIMEResolver) RegisterMIMEType(extension, mimeType string) {
|
||||
if !strings.HasPrefix(extension, ".") {
|
||||
extension = "." + extension
|
||||
}
|
||||
r.types[strings.ToLower(extension)] = mimeType
|
||||
}
|
||||
592
pkg/server/staticweb/plan.md
Normal file
592
pkg/server/staticweb/plan.md
Normal file
@@ -0,0 +1,592 @@
|
||||
# StaticWeb Package Interface-Driven Refactoring Plan
|
||||
|
||||
## Overview
|
||||
Refactor `pkg/server/staticweb` to be interface-driven, router-agnostic, and maintainable. This is a breaking change that replaces the existing API with a cleaner design.
|
||||
|
||||
## User Requirements
|
||||
- ✅ Work with any server (not just Gorilla mux)
|
||||
- ✅ Serve static files from zip files or directories
|
||||
- ✅ Support embedded, local filesystems
|
||||
- ✅ Interface-driven and maintainable architecture
|
||||
- ✅ Struct-based configuration
|
||||
- ✅ Breaking changes acceptable
|
||||
- 🔮 Remote HTTP/HTTPS and S3 support (future feature)
|
||||
|
||||
## Design Principles
|
||||
1. **Interface-first**: Define clear interfaces for all abstractions
|
||||
2. **Composition over inheritance**: Combine small, focused components
|
||||
3. **Router-agnostic**: Return standard `http.Handler` for universal compatibility
|
||||
4. **Configurable policies**: Extract hardcoded behavior into pluggable strategies
|
||||
5. **Resource safety**: Proper lifecycle management with `Close()` methods
|
||||
6. **Testability**: Mock-friendly interfaces with clear boundaries
|
||||
7. **Extensibility**: Easy to add new providers (HTTP, S3, etc.) in future
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Interfaces (pkg/server/staticweb/interfaces.go)
|
||||
|
||||
```go
|
||||
// FileSystemProvider abstracts file sources (local, zip, embedded, future: http, s3)
|
||||
type FileSystemProvider interface {
|
||||
Open(name string) (fs.File, error)
|
||||
Close() error
|
||||
Type() string
|
||||
}
|
||||
|
||||
// CachePolicy defines caching behavior
|
||||
type CachePolicy interface {
|
||||
GetCacheTime(path string) int
|
||||
GetCacheHeaders(path string) map[string]string
|
||||
}
|
||||
|
||||
// MIMETypeResolver determines content types
|
||||
type MIMETypeResolver interface {
|
||||
GetMIMEType(path string) string
|
||||
RegisterMIMEType(extension, mimeType string)
|
||||
}
|
||||
|
||||
// FallbackStrategy handles missing files
|
||||
type FallbackStrategy interface {
|
||||
ShouldFallback(path string) bool
|
||||
GetFallbackPath(path string) string
|
||||
}
|
||||
|
||||
// StaticFileService manages mount points
|
||||
type StaticFileService interface {
|
||||
Mount(config MountConfig) error
|
||||
Unmount(urlPrefix string) error
|
||||
ListMounts() []string
|
||||
Reload() error
|
||||
Close() error
|
||||
Handler() http.Handler // Router-agnostic integration
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration (pkg/server/staticweb/config.go)
|
||||
|
||||
```go
|
||||
// MountConfig configures a single mount point
|
||||
type MountConfig struct {
|
||||
URLPrefix string
|
||||
Provider FileSystemProvider
|
||||
CachePolicy CachePolicy // Optional, uses default if nil
|
||||
MIMEResolver MIMETypeResolver // Optional, uses default if nil
|
||||
FallbackStrategy FallbackStrategy // Optional, no fallback if nil
|
||||
}
|
||||
|
||||
// ServiceConfig configures the service
|
||||
type ServiceConfig struct {
|
||||
DefaultCacheTime int // Default: 48 hours
|
||||
DefaultMIMETypes map[string]string // Additional MIME types
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Step 1: Create Core Interfaces
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/interfaces.go` (NEW)
|
||||
|
||||
Define all interfaces listed above. This establishes the contract for all components.
|
||||
|
||||
### Step 2: Implement Default Policies
|
||||
**Directory**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/policies/` (NEW)
|
||||
|
||||
**File**: `policies/cache.go`
|
||||
- `SimpleCachePolicy` - Single TTL for all files
|
||||
- `ExtensionBasedCachePolicy` - Different TTL per file extension
|
||||
- `NoCachePolicy` - Disables caching
|
||||
|
||||
**File**: `policies/mime.go`
|
||||
- `DefaultMIMEResolver` - Standard web MIME types + stdlib
|
||||
- `ConfigurableMIMEResolver` - User-defined mappings
|
||||
- Migrate hardcoded MIME types from `InitMimeTypes()` (lines 60-76)
|
||||
|
||||
**File**: `policies/fallback.go`
|
||||
- `NoFallback` - Returns 404 for missing files
|
||||
- `HTMLFallbackStrategy` - SPA routing (serves index.html)
|
||||
- `ExtensionBasedFallback` - Current behavior (checks extensions)
|
||||
- Migrate logic from `StaticHTMLFallbackHandler()` (lines 241-285)
|
||||
|
||||
### Step 3: Implement FileSystem Providers
|
||||
**Directory**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/providers/` (NEW)
|
||||
|
||||
**File**: `providers/local.go`
|
||||
```go
|
||||
type LocalFSProvider struct {
|
||||
path string
|
||||
fs fs.FS
|
||||
}
|
||||
|
||||
func NewLocalFSProvider(path string) (*LocalFSProvider, error)
|
||||
func (l *LocalFSProvider) Open(name string) (fs.File, error)
|
||||
func (l *LocalFSProvider) Close() error
|
||||
func (l *LocalFSProvider) Type() string
|
||||
```
|
||||
|
||||
**File**: `providers/zip.go`
|
||||
```go
|
||||
type ZipFSProvider struct {
|
||||
zipPath string
|
||||
zipReader *zip.ReadCloser
|
||||
zipFS *zipfs.ZipFS
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewZipFSProvider(zipPath string) (*ZipFSProvider, error)
|
||||
func (z *ZipFSProvider) Open(name string) (fs.File, error)
|
||||
func (z *ZipFSProvider) Close() error
|
||||
func (z *ZipFSProvider) Type() string
|
||||
```
|
||||
- Integrates with existing `pkg/server/zipfs/zipfs.go`
|
||||
- Manages zip file lifecycle properly
|
||||
|
||||
**File**: `providers/embed.go`
|
||||
```go
|
||||
type EmbedFSProvider struct {
|
||||
embedFS *embed.FS
|
||||
zipFile string // Optional: path within embedded FS to zip file
|
||||
zipReader *zip.ReadCloser
|
||||
fs fs.FS
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewEmbedFSProvider(embedFS *embed.FS, zipFile string) (*EmbedFSProvider, error)
|
||||
func (e *EmbedFSProvider) Open(name string) (fs.File, error)
|
||||
func (e *EmbedFSProvider) Close() error
|
||||
func (e *EmbedFSProvider) Type() string
|
||||
```
|
||||
|
||||
### Step 4: Implement Mount Point
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/mount.go` (NEW)
|
||||
|
||||
```go
|
||||
type mountPoint struct {
|
||||
urlPrefix string
|
||||
provider FileSystemProvider
|
||||
cachePolicy CachePolicy
|
||||
mimeResolver MIMETypeResolver
|
||||
fallbackStrategy FallbackStrategy
|
||||
}
|
||||
|
||||
func newMountPoint(config MountConfig, defaults *ServiceConfig) (*mountPoint, error)
|
||||
func (m *mountPoint) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
func (m *mountPoint) Close() error
|
||||
```
|
||||
|
||||
**Key behaviors**:
|
||||
- Strips URL prefix before passing to provider
|
||||
- Applies cache headers via `CachePolicy`
|
||||
- Sets Content-Type via `MIMETypeResolver`
|
||||
- Falls back via `FallbackStrategy` if file not found
|
||||
- Integrates with `http.FileServer()` for actual serving
|
||||
|
||||
### Step 5: Implement Service
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/service.go` (NEW)
|
||||
|
||||
```go
|
||||
type service struct {
|
||||
mounts map[string]*mountPoint // urlPrefix -> mount
|
||||
config *ServiceConfig
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewService(config *ServiceConfig) StaticFileService
|
||||
func (s *service) Mount(config MountConfig) error
|
||||
func (s *service) Unmount(urlPrefix string) error
|
||||
func (s *service) ListMounts() []string
|
||||
func (s *service) Reload() error
|
||||
func (s *service) Close() error
|
||||
func (s *service) Handler() http.Handler
|
||||
```
|
||||
|
||||
**Handler Implementation**:
|
||||
- Performs longest-prefix matching to find mount point
|
||||
- Delegates to mount point's `ServeHTTP()`
|
||||
- Returns silently if no match (allows API routes to handle)
|
||||
- Thread-safe with `sync.RWMutex`
|
||||
|
||||
### Step 6: Create Configuration Helpers
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/config.go` (NEW)
|
||||
|
||||
```go
|
||||
// Default configurations
|
||||
func DefaultServiceConfig() *ServiceConfig
|
||||
func DefaultCachePolicy() CachePolicy
|
||||
func DefaultMIMEResolver() MIMETypeResolver
|
||||
|
||||
// Helper constructors
|
||||
func LocalProvider(path string) FileSystemProvider
|
||||
func ZipProvider(zipPath string) FileSystemProvider
|
||||
func EmbedProvider(embedFS *embed.FS, zipFile string) FileSystemProvider
|
||||
|
||||
// Policy constructors
|
||||
func SimpleCache(seconds int) CachePolicy
|
||||
func ExtensionCache(rules map[string]int) CachePolicy
|
||||
func HTMLFallback(indexFile string) FallbackStrategy
|
||||
func ExtensionFallback(staticExtensions []string) FallbackStrategy
|
||||
```
|
||||
|
||||
### Step 7: Update/Remove Existing Files
|
||||
|
||||
**REMOVE**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/staticweb.go`
|
||||
- All functionality migrated to new interface-based design
|
||||
- No backward compatibility needed per user request
|
||||
|
||||
**KEEP**: `/home/hein/hein/dev/ResolveSpec/pkg/server/zipfs/zipfs.go`
|
||||
- Still used by `ZipFSProvider`
|
||||
- Already implements `fs.FS` interface correctly
|
||||
|
||||
### Step 8: Create Examples and Tests
|
||||
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/example_test.go` (NEW)
|
||||
|
||||
```go
|
||||
func ExampleService_basic() { /* Serve local directory */ }
|
||||
func ExampleService_spa() { /* SPA with fallback */ }
|
||||
func ExampleService_multiple() { /* Multiple mount points */ }
|
||||
func ExampleService_zip() { /* Serve from zip file */ }
|
||||
func ExampleService_embedded() { /* Serve from embedded zip */ }
|
||||
```
|
||||
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/service_test.go` (NEW)
|
||||
- Test mount/unmount operations
|
||||
- Test longest-prefix matching
|
||||
- Test concurrent access
|
||||
- Test resource cleanup
|
||||
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/providers/providers_test.go` (NEW)
|
||||
- Test each provider implementation
|
||||
- Test resource cleanup
|
||||
- Test error handling
|
||||
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/testing/mocks.go` (NEW)
|
||||
- `MockFileSystemProvider` - In-memory file storage
|
||||
- `MockCachePolicy` - Configurable cache behavior
|
||||
- `MockMIMEResolver` - Custom MIME mappings
|
||||
- Test helpers for common scenarios
|
||||
|
||||
### Step 9: Create Documentation
|
||||
|
||||
**File**: `/home/hein/hein/dev/ResolveSpec/pkg/server/staticweb/README.md` (NEW)
|
||||
|
||||
Document:
|
||||
- Quick start examples
|
||||
- Interface overview
|
||||
- Provider implementations
|
||||
- Policy customization
|
||||
- Router integration patterns
|
||||
- Migration guide from old API
|
||||
- Future features roadmap
|
||||
|
||||
## Key Improvements Over Current Implementation
|
||||
|
||||
### 1. Router-Agnostic Design
|
||||
**Before**: Coupled to Gorilla mux via `RegisterRoutes(*mux.Router)`
|
||||
**After**: Returns `http.Handler`, works with any router
|
||||
|
||||
```go
|
||||
// Works with Gorilla Mux
|
||||
muxRouter.PathPrefix("/").Handler(service.Handler())
|
||||
|
||||
// Works with standard http.ServeMux
|
||||
http.Handle("/", service.Handler())
|
||||
|
||||
// Works with any http.Handler-compatible router
|
||||
```
|
||||
|
||||
### 2. Configurable Behaviors
|
||||
**Before**: Hardcoded MIME types, cache times, file extensions
|
||||
**After**: Pluggable policies
|
||||
|
||||
```go
|
||||
// Custom cache per file type
|
||||
cachePolicy := ExtensionCache(map[string]int{
|
||||
".html": 3600, // 1 hour
|
||||
".js": 86400, // 1 day
|
||||
".css": 86400, // 1 day
|
||||
".png": 604800, // 1 week
|
||||
})
|
||||
|
||||
// Custom fallback logic
|
||||
fallback := HTMLFallback("index.html")
|
||||
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: LocalProvider("./dist"),
|
||||
CachePolicy: cachePolicy,
|
||||
FallbackStrategy: fallback,
|
||||
})
|
||||
```
|
||||
|
||||
### 3. Better Resource Management
|
||||
**Before**: Manual zip file cleanup, easy to leak resources
|
||||
**After**: Proper lifecycle with `Close()` on all components
|
||||
|
||||
```go
|
||||
defer service.Close() // Cleans up all providers
|
||||
```
|
||||
|
||||
### 4. Testability
|
||||
**Before**: Hard to test, coupled to filesystem
|
||||
**After**: Mock providers for testing
|
||||
|
||||
```go
|
||||
mockProvider := testing.NewInMemoryProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
})
|
||||
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: mockProvider,
|
||||
})
|
||||
```
|
||||
|
||||
### 5. Extensibility
|
||||
**Before**: Need to modify code to support new file sources
|
||||
**After**: Implement `FileSystemProvider` interface
|
||||
|
||||
```go
|
||||
// Future: Add HTTP provider without changing core code
|
||||
type HTTPFSProvider struct { /* ... */ }
|
||||
func (h *HTTPFSProvider) Open(name string) (fs.File, error) { /* ... */ }
|
||||
func (h *HTTPFSProvider) Close() error { /* ... */ }
|
||||
func (h *HTTPFSProvider) Type() string { return "http" }
|
||||
```
|
||||
|
||||
## Future Features (To Implement Later)
|
||||
|
||||
### Remote HTTP/HTTPS Provider
|
||||
**File**: `providers/http.go` (FUTURE)
|
||||
|
||||
Serve static files from remote HTTP servers with local caching:
|
||||
|
||||
```go
|
||||
type HTTPFSProvider struct {
|
||||
baseURL string
|
||||
httpClient *http.Client
|
||||
cache LocalCache // Optional disk/memory cache
|
||||
cacheTTL time.Duration
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Example usage
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/cdn",
|
||||
Provider: HTTPProvider("https://cdn.example.com/assets"),
|
||||
})
|
||||
```
|
||||
|
||||
**Features**:
|
||||
- Fetch files from remote URLs on-demand
|
||||
- Local cache to reduce remote requests
|
||||
- Configurable TTL and cache eviction
|
||||
- HEAD request support for metadata
|
||||
- Retry logic and timeout handling
|
||||
- Support for authentication headers
|
||||
|
||||
### S3-Compatible Provider
|
||||
**File**: `providers/s3.go` (FUTURE)
|
||||
|
||||
Serve static files from S3, MinIO, or S3-compatible storage:
|
||||
|
||||
```go
|
||||
type S3FSProvider struct {
|
||||
bucket string
|
||||
prefix string
|
||||
region string
|
||||
client *s3.Client
|
||||
cache LocalCache
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Example usage
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/media",
|
||||
Provider: S3Provider("my-bucket", "static/", "us-east-1"),
|
||||
})
|
||||
```
|
||||
|
||||
**Features**:
|
||||
- List and fetch objects from S3 buckets
|
||||
- Support for AWS S3, MinIO, DigitalOcean Spaces, etc.
|
||||
- IAM role or credential-based authentication
|
||||
- Optional local caching layer
|
||||
- Efficient metadata retrieval
|
||||
- Support for presigned URLs
|
||||
|
||||
### Other Future Providers
|
||||
- **GitProvider**: Serve files from Git repositories
|
||||
- **MemoryProvider**: In-memory filesystem for testing/temporary files
|
||||
- **ProxyProvider**: Proxy to another static file server
|
||||
- **CompositeProvider**: Fallback chain across multiple providers
|
||||
|
||||
### Advanced Cache Policies (FUTURE)
|
||||
- **TimedCachePolicy**: Different cache times by time of day
|
||||
- **UserAgentCachePolicy**: Cache based on client type
|
||||
- **ConditionalCachePolicy**: Smart cache based on file size/type
|
||||
- **DistributedCachePolicy**: Shared cache across service instances
|
||||
|
||||
### Advanced Fallback Strategies (FUTURE)
|
||||
- **RegexFallbackStrategy**: Pattern-based routing
|
||||
- **I18nFallbackStrategy**: Language-based file resolution
|
||||
- **VersionedFallbackStrategy**: A/B testing support
|
||||
|
||||
## Critical Files Summary
|
||||
|
||||
### Files to CREATE (in order):
|
||||
1. `pkg/server/staticweb/interfaces.go` - Core contracts
|
||||
2. `pkg/server/staticweb/config.go` - Configuration structs and helpers
|
||||
3. `pkg/server/staticweb/policies/cache.go` - Cache policy implementations
|
||||
4. `pkg/server/staticweb/policies/mime.go` - MIME resolver implementations
|
||||
5. `pkg/server/staticweb/policies/fallback.go` - Fallback strategy implementations
|
||||
6. `pkg/server/staticweb/providers/local.go` - Local directory provider
|
||||
7. `pkg/server/staticweb/providers/zip.go` - Zip file provider
|
||||
8. `pkg/server/staticweb/providers/embed.go` - Embedded filesystem provider
|
||||
9. `pkg/server/staticweb/mount.go` - Mount point implementation
|
||||
10. `pkg/server/staticweb/service.go` - Main service implementation
|
||||
11. `pkg/server/staticweb/testing/mocks.go` - Test helpers
|
||||
12. `pkg/server/staticweb/service_test.go` - Service tests
|
||||
13. `pkg/server/staticweb/providers/providers_test.go` - Provider tests
|
||||
14. `pkg/server/staticweb/example_test.go` - Example code
|
||||
15. `pkg/server/staticweb/README.md` - Documentation
|
||||
|
||||
### Files to REMOVE:
|
||||
1. `pkg/server/staticweb/staticweb.go` - Replaced by new design
|
||||
|
||||
### Files to KEEP:
|
||||
1. `pkg/server/zipfs/zipfs.go` - Used by ZipFSProvider
|
||||
|
||||
### Files for FUTURE (not in this refactoring):
|
||||
1. `pkg/server/staticweb/providers/http.go` - HTTP/HTTPS remote provider
|
||||
2. `pkg/server/staticweb/providers/s3.go` - S3-compatible storage provider
|
||||
3. `pkg/server/staticweb/providers/composite.go` - Fallback chain provider
|
||||
|
||||
## Example Usage After Refactoring
|
||||
|
||||
### Basic Static Site
|
||||
```go
|
||||
service := staticweb.NewService(nil) // Use defaults
|
||||
|
||||
err := service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: staticweb.LocalProvider("./public"),
|
||||
})
|
||||
|
||||
muxRouter.PathPrefix("/").Handler(service.Handler())
|
||||
```
|
||||
|
||||
### SPA with API Routes
|
||||
```go
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: staticweb.LocalProvider("./dist"),
|
||||
FallbackStrategy: staticweb.HTMLFallback("index.html"),
|
||||
})
|
||||
|
||||
// API routes take precedence (registered first)
|
||||
muxRouter.HandleFunc("/api/users", usersHandler)
|
||||
muxRouter.HandleFunc("/api/posts", postsHandler)
|
||||
|
||||
// Static files handle all other routes
|
||||
muxRouter.PathPrefix("/").Handler(service.Handler())
|
||||
```
|
||||
|
||||
### Multiple Mount Points with Different Policies
|
||||
```go
|
||||
service := staticweb.NewService(&staticweb.ServiceConfig{
|
||||
DefaultCacheTime: 3600,
|
||||
})
|
||||
|
||||
// Assets with long cache
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/assets",
|
||||
Provider: staticweb.LocalProvider("./assets"),
|
||||
CachePolicy: staticweb.SimpleCache(604800), // 1 week
|
||||
})
|
||||
|
||||
// HTML with short cache
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: staticweb.LocalProvider("./public"),
|
||||
CachePolicy: staticweb.SimpleCache(300), // 5 minutes
|
||||
})
|
||||
|
||||
router.PathPrefix("/").Handler(service.Handler())
|
||||
```
|
||||
|
||||
### Embedded Files from Zip
|
||||
```go
|
||||
//go:embed assets.zip
|
||||
var assetsZip embed.FS
|
||||
|
||||
service := staticweb.NewService(nil)
|
||||
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: staticweb.EmbedProvider(&assetsZip, "assets.zip"),
|
||||
})
|
||||
|
||||
router.PathPrefix("/").Handler(service.Handler())
|
||||
```
|
||||
|
||||
### Future: CDN Fallback (when HTTP provider is implemented)
|
||||
```go
|
||||
// Primary CDN with local fallback
|
||||
service.Mount(staticweb.MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: staticweb.CompositeProvider(
|
||||
staticweb.HTTPProvider("https://cdn.example.com/assets"),
|
||||
staticweb.LocalProvider("./public/assets"),
|
||||
),
|
||||
})
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
- Each provider implementation independently
|
||||
- Each policy implementation independently
|
||||
- Mount point request handling
|
||||
- Service mount/unmount operations
|
||||
|
||||
### Integration Tests
|
||||
- Full request flow through service
|
||||
- Multiple mount points
|
||||
- Longest-prefix matching
|
||||
- Resource cleanup
|
||||
|
||||
### Example Tests
|
||||
- Executable examples in `example_test.go`
|
||||
- Demonstrate common usage patterns
|
||||
|
||||
## Migration Impact
|
||||
|
||||
### Breaking Changes
|
||||
- Complete API redesign (acceptable per user)
|
||||
- Package not currently used in codebase (no migration needed)
|
||||
- New consumers will use new API from start
|
||||
|
||||
### Future Extensibility
|
||||
The interface-driven design allows future additions without breaking changes:
|
||||
- Add HTTPFSProvider by implementing `FileSystemProvider`
|
||||
- Add S3FSProvider by implementing `FileSystemProvider`
|
||||
- Add custom cache policies by implementing `CachePolicy`
|
||||
- Add custom fallback strategies by implementing `FallbackStrategy`
|
||||
|
||||
## Implementation Order
|
||||
1. Interfaces (foundation)
|
||||
2. Configuration (API surface)
|
||||
3. Policies (pluggable behavior)
|
||||
4. Providers (filesystem abstraction)
|
||||
5. Mount Point (request handling)
|
||||
6. Service (orchestration)
|
||||
7. Tests (validation)
|
||||
8. Documentation (usage)
|
||||
9. Remove old code (cleanup)
|
||||
|
||||
This order ensures each layer builds on tested, working components.
|
||||
|
||||
---
|
||||
|
||||
103
pkg/server/staticweb/policies/cache.go
Normal file
103
pkg/server/staticweb/policies/cache.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SimpleCachePolicy implements a basic cache policy with a single TTL for all files.
|
||||
type SimpleCachePolicy struct {
|
||||
cacheTime int // Cache duration in seconds
|
||||
}
|
||||
|
||||
// NewSimpleCachePolicy creates a new SimpleCachePolicy with the given cache time in seconds.
|
||||
func NewSimpleCachePolicy(cacheTimeSeconds int) *SimpleCachePolicy {
|
||||
return &SimpleCachePolicy{
|
||||
cacheTime: cacheTimeSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
// GetCacheTime returns the cache duration for any file.
|
||||
func (p *SimpleCachePolicy) GetCacheTime(filePath string) int {
|
||||
return p.cacheTime
|
||||
}
|
||||
|
||||
// GetCacheHeaders returns the Cache-Control header for the given file.
|
||||
func (p *SimpleCachePolicy) GetCacheHeaders(filePath string) map[string]string {
|
||||
if p.cacheTime <= 0 {
|
||||
return map[string]string{
|
||||
"Cache-Control": "no-cache, no-store, must-revalidate",
|
||||
"Pragma": "no-cache",
|
||||
"Expires": "0",
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
"Cache-Control": fmt.Sprintf("public, max-age=%d", p.cacheTime),
|
||||
}
|
||||
}
|
||||
|
||||
// ExtensionBasedCachePolicy implements a cache policy that varies by file extension.
|
||||
type ExtensionBasedCachePolicy struct {
|
||||
rules map[string]int // Extension -> cache time in seconds
|
||||
defaultTime int // Default cache time for unmatched extensions
|
||||
}
|
||||
|
||||
// NewExtensionBasedCachePolicy creates a new ExtensionBasedCachePolicy.
|
||||
// rules maps file extensions (with leading dot, e.g., ".js") to cache times in seconds.
|
||||
// defaultTime is used for files that don't match any rule.
|
||||
func NewExtensionBasedCachePolicy(rules map[string]int, defaultTime int) *ExtensionBasedCachePolicy {
|
||||
return &ExtensionBasedCachePolicy{
|
||||
rules: rules,
|
||||
defaultTime: defaultTime,
|
||||
}
|
||||
}
|
||||
|
||||
// GetCacheTime returns the cache duration based on the file extension.
|
||||
func (p *ExtensionBasedCachePolicy) GetCacheTime(filePath string) int {
|
||||
ext := strings.ToLower(path.Ext(filePath))
|
||||
if cacheTime, ok := p.rules[ext]; ok {
|
||||
return cacheTime
|
||||
}
|
||||
return p.defaultTime
|
||||
}
|
||||
|
||||
// GetCacheHeaders returns cache headers based on the file extension.
|
||||
func (p *ExtensionBasedCachePolicy) GetCacheHeaders(filePath string) map[string]string {
|
||||
cacheTime := p.GetCacheTime(filePath)
|
||||
|
||||
if cacheTime <= 0 {
|
||||
return map[string]string{
|
||||
"Cache-Control": "no-cache, no-store, must-revalidate",
|
||||
"Pragma": "no-cache",
|
||||
"Expires": "0",
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
"Cache-Control": fmt.Sprintf("public, max-age=%d", cacheTime),
|
||||
}
|
||||
}
|
||||
|
||||
// NoCachePolicy implements a cache policy that disables all caching.
|
||||
type NoCachePolicy struct{}
|
||||
|
||||
// NewNoCachePolicy creates a new NoCachePolicy.
|
||||
func NewNoCachePolicy() *NoCachePolicy {
|
||||
return &NoCachePolicy{}
|
||||
}
|
||||
|
||||
// GetCacheTime always returns 0 (no caching).
|
||||
func (p *NoCachePolicy) GetCacheTime(filePath string) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetCacheHeaders returns headers that disable caching.
|
||||
func (p *NoCachePolicy) GetCacheHeaders(filePath string) map[string]string {
|
||||
return map[string]string{
|
||||
"Cache-Control": "no-cache, no-store, must-revalidate",
|
||||
"Pragma": "no-cache",
|
||||
"Expires": "0",
|
||||
}
|
||||
}
|
||||
159
pkg/server/staticweb/policies/fallback.go
Normal file
159
pkg/server/staticweb/policies/fallback.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NoFallback implements a fallback strategy that never falls back.
|
||||
// All requests for missing files will result in 404 responses.
|
||||
type NoFallback struct{}
|
||||
|
||||
// NewNoFallback creates a new NoFallback strategy.
|
||||
func NewNoFallback() *NoFallback {
|
||||
return &NoFallback{}
|
||||
}
|
||||
|
||||
// ShouldFallback always returns false.
|
||||
func (f *NoFallback) ShouldFallback(filePath string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetFallbackPath returns an empty string (never called since ShouldFallback returns false).
|
||||
func (f *NoFallback) GetFallbackPath(filePath string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// HTMLFallbackStrategy implements a fallback strategy for Single Page Applications (SPAs).
|
||||
// It serves a specified HTML file (typically index.html) for non-file requests.
|
||||
type HTMLFallbackStrategy struct {
|
||||
indexFile string
|
||||
}
|
||||
|
||||
// NewHTMLFallbackStrategy creates a new HTMLFallbackStrategy.
|
||||
// indexFile is the path to the HTML file to serve (e.g., "index.html", "/index.html").
|
||||
func NewHTMLFallbackStrategy(indexFile string) *HTMLFallbackStrategy {
|
||||
return &HTMLFallbackStrategy{
|
||||
indexFile: indexFile,
|
||||
}
|
||||
}
|
||||
|
||||
// ShouldFallback returns true for requests that don't look like static assets.
|
||||
func (f *HTMLFallbackStrategy) ShouldFallback(filePath string) bool {
|
||||
// Always fall back unless it looks like a static asset
|
||||
return !f.isStaticAsset(filePath)
|
||||
}
|
||||
|
||||
// GetFallbackPath returns the index file path.
|
||||
func (f *HTMLFallbackStrategy) GetFallbackPath(filePath string) string {
|
||||
return f.indexFile
|
||||
}
|
||||
|
||||
// isStaticAsset checks if the path looks like a static asset (has a file extension).
|
||||
func (f *HTMLFallbackStrategy) isStaticAsset(filePath string) bool {
|
||||
return path.Ext(filePath) != ""
|
||||
}
|
||||
|
||||
// ExtensionBasedFallback implements a fallback strategy that skips fallback for known static file extensions.
|
||||
// This is the behavior from the original StaticHTMLFallbackHandler.
|
||||
type ExtensionBasedFallback struct {
|
||||
staticExtensions map[string]bool
|
||||
fallbackPath string
|
||||
}
|
||||
|
||||
// NewExtensionBasedFallback creates a new ExtensionBasedFallback strategy.
|
||||
// staticExtensions is a list of file extensions (with leading dot) that should NOT use fallback.
|
||||
// fallbackPath is the file to serve when fallback is triggered.
|
||||
func NewExtensionBasedFallback(staticExtensions []string, fallbackPath string) *ExtensionBasedFallback {
|
||||
extMap := make(map[string]bool)
|
||||
for _, ext := range staticExtensions {
|
||||
if !strings.HasPrefix(ext, ".") {
|
||||
ext = "." + ext
|
||||
}
|
||||
extMap[strings.ToLower(ext)] = true
|
||||
}
|
||||
|
||||
return &ExtensionBasedFallback{
|
||||
staticExtensions: extMap,
|
||||
fallbackPath: fallbackPath,
|
||||
}
|
||||
}
|
||||
|
||||
// NewDefaultExtensionBasedFallback creates an ExtensionBasedFallback with common web asset extensions.
|
||||
// This matches the behavior of the original StaticHTMLFallbackHandler.
|
||||
func NewDefaultExtensionBasedFallback(fallbackPath string) *ExtensionBasedFallback {
|
||||
return NewExtensionBasedFallback([]string{
|
||||
".js", ".css", ".png", ".svg", ".ico", ".json",
|
||||
".jpg", ".jpeg", ".gif", ".woff", ".woff2", ".ttf", ".eot",
|
||||
}, fallbackPath)
|
||||
}
|
||||
|
||||
// ShouldFallback returns true if the file path doesn't have a static asset extension.
|
||||
func (f *ExtensionBasedFallback) ShouldFallback(filePath string) bool {
|
||||
ext := strings.ToLower(path.Ext(filePath))
|
||||
|
||||
// If it's a known static extension, don't fallback
|
||||
if f.staticExtensions[ext] {
|
||||
return false
|
||||
}
|
||||
|
||||
// Otherwise, try fallback
|
||||
return true
|
||||
}
|
||||
|
||||
// GetFallbackPath returns the configured fallback path.
|
||||
func (f *ExtensionBasedFallback) GetFallbackPath(filePath string) string {
|
||||
return f.fallbackPath
|
||||
}
|
||||
|
||||
// HTMLExtensionFallback implements a fallback strategy that appends .html to paths.
|
||||
// This tries to serve {path}.html for missing files.
|
||||
type HTMLExtensionFallback struct {
|
||||
staticExtensions map[string]bool
|
||||
}
|
||||
|
||||
// NewHTMLExtensionFallback creates a new HTMLExtensionFallback strategy.
|
||||
func NewHTMLExtensionFallback(staticExtensions []string) *HTMLExtensionFallback {
|
||||
extMap := make(map[string]bool)
|
||||
for _, ext := range staticExtensions {
|
||||
if !strings.HasPrefix(ext, ".") {
|
||||
ext = "." + ext
|
||||
}
|
||||
extMap[strings.ToLower(ext)] = true
|
||||
}
|
||||
|
||||
return &HTMLExtensionFallback{
|
||||
staticExtensions: extMap,
|
||||
}
|
||||
}
|
||||
|
||||
// ShouldFallback returns true if the path doesn't have a static extension or .html.
|
||||
func (f *HTMLExtensionFallback) ShouldFallback(filePath string) bool {
|
||||
ext := strings.ToLower(path.Ext(filePath))
|
||||
|
||||
// If it's a known static extension, don't fallback
|
||||
if f.staticExtensions[ext] {
|
||||
return false
|
||||
}
|
||||
|
||||
// If it already has .html, don't fallback
|
||||
if ext == ".html" || ext == ".htm" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetFallbackPath returns the path with .html appended.
|
||||
func (f *HTMLExtensionFallback) GetFallbackPath(filePath string) string {
|
||||
cleanPath := path.Clean(filePath)
|
||||
if !strings.HasSuffix(filePath, "/") {
|
||||
cleanPath = strings.TrimRight(cleanPath, "/")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(strings.ToLower(cleanPath), ".html") {
|
||||
return cleanPath + ".html"
|
||||
}
|
||||
|
||||
return cleanPath
|
||||
}
|
||||
245
pkg/server/staticweb/policies/mime.go
Normal file
245
pkg/server/staticweb/policies/mime.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// DefaultMIMEResolver implements a MIME type resolver using Go's standard mime package
|
||||
// and a set of common web file type mappings.
|
||||
type DefaultMIMEResolver struct {
|
||||
customTypes map[string]string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewDefaultMIMEResolver creates a new DefaultMIMEResolver with common web MIME types.
|
||||
func NewDefaultMIMEResolver() *DefaultMIMEResolver {
|
||||
resolver := &DefaultMIMEResolver{
|
||||
customTypes: make(map[string]string),
|
||||
}
|
||||
|
||||
// JavaScript & TypeScript
|
||||
resolver.RegisterMIMEType(".js", "application/javascript")
|
||||
resolver.RegisterMIMEType(".mjs", "application/javascript")
|
||||
resolver.RegisterMIMEType(".cjs", "application/javascript")
|
||||
resolver.RegisterMIMEType(".ts", "text/typescript")
|
||||
resolver.RegisterMIMEType(".tsx", "text/tsx")
|
||||
resolver.RegisterMIMEType(".jsx", "text/jsx")
|
||||
|
||||
// CSS & Styling
|
||||
resolver.RegisterMIMEType(".css", "text/css")
|
||||
resolver.RegisterMIMEType(".scss", "text/x-scss")
|
||||
resolver.RegisterMIMEType(".sass", "text/x-sass")
|
||||
resolver.RegisterMIMEType(".less", "text/x-less")
|
||||
|
||||
// HTML & XML
|
||||
resolver.RegisterMIMEType(".html", "text/html")
|
||||
resolver.RegisterMIMEType(".htm", "text/html")
|
||||
resolver.RegisterMIMEType(".xml", "application/xml")
|
||||
resolver.RegisterMIMEType(".xhtml", "application/xhtml+xml")
|
||||
|
||||
// Images - Raster
|
||||
resolver.RegisterMIMEType(".png", "image/png")
|
||||
resolver.RegisterMIMEType(".jpg", "image/jpeg")
|
||||
resolver.RegisterMIMEType(".jpeg", "image/jpeg")
|
||||
resolver.RegisterMIMEType(".gif", "image/gif")
|
||||
resolver.RegisterMIMEType(".webp", "image/webp")
|
||||
resolver.RegisterMIMEType(".avif", "image/avif")
|
||||
resolver.RegisterMIMEType(".bmp", "image/bmp")
|
||||
resolver.RegisterMIMEType(".tiff", "image/tiff")
|
||||
resolver.RegisterMIMEType(".tif", "image/tiff")
|
||||
resolver.RegisterMIMEType(".ico", "image/x-icon")
|
||||
resolver.RegisterMIMEType(".cur", "image/x-icon")
|
||||
|
||||
// Images - Vector
|
||||
resolver.RegisterMIMEType(".svg", "image/svg+xml")
|
||||
resolver.RegisterMIMEType(".svgz", "image/svg+xml")
|
||||
|
||||
// Fonts
|
||||
resolver.RegisterMIMEType(".woff", "font/woff")
|
||||
resolver.RegisterMIMEType(".woff2", "font/woff2")
|
||||
resolver.RegisterMIMEType(".ttf", "font/ttf")
|
||||
resolver.RegisterMIMEType(".otf", "font/otf")
|
||||
resolver.RegisterMIMEType(".eot", "application/vnd.ms-fontobject")
|
||||
|
||||
// Audio
|
||||
resolver.RegisterMIMEType(".mp3", "audio/mpeg")
|
||||
resolver.RegisterMIMEType(".wav", "audio/wav")
|
||||
resolver.RegisterMIMEType(".ogg", "audio/ogg")
|
||||
resolver.RegisterMIMEType(".oga", "audio/ogg")
|
||||
resolver.RegisterMIMEType(".m4a", "audio/mp4")
|
||||
resolver.RegisterMIMEType(".aac", "audio/aac")
|
||||
resolver.RegisterMIMEType(".flac", "audio/flac")
|
||||
resolver.RegisterMIMEType(".opus", "audio/opus")
|
||||
resolver.RegisterMIMEType(".weba", "audio/webm")
|
||||
|
||||
// Video
|
||||
resolver.RegisterMIMEType(".mp4", "video/mp4")
|
||||
resolver.RegisterMIMEType(".webm", "video/webm")
|
||||
resolver.RegisterMIMEType(".ogv", "video/ogg")
|
||||
resolver.RegisterMIMEType(".avi", "video/x-msvideo")
|
||||
resolver.RegisterMIMEType(".mpeg", "video/mpeg")
|
||||
resolver.RegisterMIMEType(".mpg", "video/mpeg")
|
||||
resolver.RegisterMIMEType(".mov", "video/quicktime")
|
||||
resolver.RegisterMIMEType(".wmv", "video/x-ms-wmv")
|
||||
resolver.RegisterMIMEType(".flv", "video/x-flv")
|
||||
resolver.RegisterMIMEType(".mkv", "video/x-matroska")
|
||||
resolver.RegisterMIMEType(".m4v", "video/mp4")
|
||||
|
||||
// Data & Configuration
|
||||
resolver.RegisterMIMEType(".json", "application/json")
|
||||
resolver.RegisterMIMEType(".xml", "application/xml")
|
||||
resolver.RegisterMIMEType(".yml", "application/yaml")
|
||||
resolver.RegisterMIMEType(".yaml", "application/yaml")
|
||||
resolver.RegisterMIMEType(".toml", "application/toml")
|
||||
resolver.RegisterMIMEType(".ini", "text/plain")
|
||||
resolver.RegisterMIMEType(".conf", "text/plain")
|
||||
resolver.RegisterMIMEType(".config", "text/plain")
|
||||
|
||||
// Documents
|
||||
resolver.RegisterMIMEType(".pdf", "application/pdf")
|
||||
resolver.RegisterMIMEType(".doc", "application/msword")
|
||||
resolver.RegisterMIMEType(".docx", "application/vnd.openxmlformats-officedocument.wordprocessingml.document")
|
||||
resolver.RegisterMIMEType(".xls", "application/vnd.ms-excel")
|
||||
resolver.RegisterMIMEType(".xlsx", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
|
||||
resolver.RegisterMIMEType(".ppt", "application/vnd.ms-powerpoint")
|
||||
resolver.RegisterMIMEType(".pptx", "application/vnd.openxmlformats-officedocument.presentationml.presentation")
|
||||
resolver.RegisterMIMEType(".odt", "application/vnd.oasis.opendocument.text")
|
||||
resolver.RegisterMIMEType(".ods", "application/vnd.oasis.opendocument.spreadsheet")
|
||||
resolver.RegisterMIMEType(".odp", "application/vnd.oasis.opendocument.presentation")
|
||||
|
||||
// Archives
|
||||
resolver.RegisterMIMEType(".zip", "application/zip")
|
||||
resolver.RegisterMIMEType(".tar", "application/x-tar")
|
||||
resolver.RegisterMIMEType(".gz", "application/gzip")
|
||||
resolver.RegisterMIMEType(".bz2", "application/x-bzip2")
|
||||
resolver.RegisterMIMEType(".7z", "application/x-7z-compressed")
|
||||
resolver.RegisterMIMEType(".rar", "application/vnd.rar")
|
||||
|
||||
// Text files
|
||||
resolver.RegisterMIMEType(".txt", "text/plain")
|
||||
resolver.RegisterMIMEType(".md", "text/markdown")
|
||||
resolver.RegisterMIMEType(".markdown", "text/markdown")
|
||||
resolver.RegisterMIMEType(".csv", "text/csv")
|
||||
resolver.RegisterMIMEType(".log", "text/plain")
|
||||
|
||||
// Source code (for syntax highlighting in browsers)
|
||||
resolver.RegisterMIMEType(".c", "text/x-c")
|
||||
resolver.RegisterMIMEType(".cpp", "text/x-c++")
|
||||
resolver.RegisterMIMEType(".h", "text/x-c")
|
||||
resolver.RegisterMIMEType(".hpp", "text/x-c++")
|
||||
resolver.RegisterMIMEType(".go", "text/x-go")
|
||||
resolver.RegisterMIMEType(".py", "text/x-python")
|
||||
resolver.RegisterMIMEType(".java", "text/x-java")
|
||||
resolver.RegisterMIMEType(".rs", "text/x-rust")
|
||||
resolver.RegisterMIMEType(".rb", "text/x-ruby")
|
||||
resolver.RegisterMIMEType(".php", "text/x-php")
|
||||
resolver.RegisterMIMEType(".sh", "text/x-shellscript")
|
||||
resolver.RegisterMIMEType(".bash", "text/x-shellscript")
|
||||
resolver.RegisterMIMEType(".sql", "text/x-sql")
|
||||
resolver.RegisterMIMEType(".template.sql", "text/plain")
|
||||
resolver.RegisterMIMEType(".upg", "text/plain")
|
||||
|
||||
// Web Assembly
|
||||
resolver.RegisterMIMEType(".wasm", "application/wasm")
|
||||
|
||||
// Manifest & Service Worker
|
||||
resolver.RegisterMIMEType(".webmanifest", "application/manifest+json")
|
||||
resolver.RegisterMIMEType(".manifest", "text/cache-manifest")
|
||||
|
||||
// 3D Models
|
||||
resolver.RegisterMIMEType(".gltf", "model/gltf+json")
|
||||
resolver.RegisterMIMEType(".glb", "model/gltf-binary")
|
||||
resolver.RegisterMIMEType(".obj", "model/obj")
|
||||
resolver.RegisterMIMEType(".stl", "model/stl")
|
||||
|
||||
// Other common web assets
|
||||
resolver.RegisterMIMEType(".map", "application/json") // Source maps
|
||||
resolver.RegisterMIMEType(".swf", "application/x-shockwave-flash")
|
||||
resolver.RegisterMIMEType(".apk", "application/vnd.android.package-archive")
|
||||
resolver.RegisterMIMEType(".dmg", "application/x-apple-diskimage")
|
||||
resolver.RegisterMIMEType(".exe", "application/x-msdownload")
|
||||
resolver.RegisterMIMEType(".iso", "application/x-iso9660-image")
|
||||
|
||||
return resolver
|
||||
}
|
||||
|
||||
// GetMIMEType returns the MIME type for the given file path.
|
||||
// It first checks custom registered types, then falls back to Go's mime.TypeByExtension.
|
||||
func (r *DefaultMIMEResolver) GetMIMEType(filePath string) string {
|
||||
ext := strings.ToLower(path.Ext(filePath))
|
||||
|
||||
// Check custom types first
|
||||
r.mu.RLock()
|
||||
if mimeType, ok := r.customTypes[ext]; ok {
|
||||
r.mu.RUnlock()
|
||||
return mimeType
|
||||
}
|
||||
r.mu.RUnlock()
|
||||
|
||||
// Fall back to standard library
|
||||
if mimeType := mime.TypeByExtension(ext); mimeType != "" {
|
||||
return mimeType
|
||||
}
|
||||
|
||||
// Return empty string if unknown
|
||||
return ""
|
||||
}
|
||||
|
||||
// RegisterMIMEType registers a custom MIME type for the given file extension.
|
||||
func (r *DefaultMIMEResolver) RegisterMIMEType(extension, mimeType string) {
|
||||
if !strings.HasPrefix(extension, ".") {
|
||||
extension = "." + extension
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.customTypes[strings.ToLower(extension)] = mimeType
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
// ConfigurableMIMEResolver implements a MIME type resolver with user-defined mappings only.
|
||||
// It does not use any default mappings.
|
||||
type ConfigurableMIMEResolver struct {
|
||||
types map[string]string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewConfigurableMIMEResolver creates a new ConfigurableMIMEResolver with the given mappings.
|
||||
func NewConfigurableMIMEResolver(types map[string]string) *ConfigurableMIMEResolver {
|
||||
resolver := &ConfigurableMIMEResolver{
|
||||
types: make(map[string]string),
|
||||
}
|
||||
|
||||
for ext, mimeType := range types {
|
||||
resolver.RegisterMIMEType(ext, mimeType)
|
||||
}
|
||||
|
||||
return resolver
|
||||
}
|
||||
|
||||
// GetMIMEType returns the MIME type for the given file path.
|
||||
func (r *ConfigurableMIMEResolver) GetMIMEType(filePath string) string {
|
||||
ext := strings.ToLower(path.Ext(filePath))
|
||||
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
if mimeType, ok := r.types[ext]; ok {
|
||||
return mimeType
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// RegisterMIMEType registers a MIME type for the given file extension.
|
||||
func (r *ConfigurableMIMEResolver) RegisterMIMEType(extension, mimeType string) {
|
||||
if !strings.HasPrefix(extension, ".") {
|
||||
extension = "." + extension
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.types[strings.ToLower(extension)] = mimeType
|
||||
r.mu.Unlock()
|
||||
}
|
||||
119
pkg/server/staticweb/providers/embed.go
Normal file
119
pkg/server/staticweb/providers/embed.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"sync"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server/zipfs"
|
||||
)
|
||||
|
||||
// EmbedFSProvider serves files from an embedded filesystem.
|
||||
// It supports both direct embedded directories and embedded zip files.
|
||||
type EmbedFSProvider struct {
|
||||
embedFS *embed.FS
|
||||
zipFile string // Optional: path within embedded FS to zip file
|
||||
zipReader *zip.Reader
|
||||
fs fs.FS
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewEmbedFSProvider creates a new EmbedFSProvider.
|
||||
// If zipFile is empty, the embedded FS is used directly.
|
||||
// If zipFile is specified, it's treated as a path to a zip file within the embedded FS.
|
||||
func NewEmbedFSProvider(embedFS fs.FS, zipFile string) (*EmbedFSProvider, error) {
|
||||
if embedFS == nil {
|
||||
return nil, fmt.Errorf("embedded filesystem cannot be nil")
|
||||
}
|
||||
|
||||
// Try to cast to *embed.FS for tracking purposes
|
||||
var embedFSPtr *embed.FS
|
||||
if efs, ok := embedFS.(*embed.FS); ok {
|
||||
embedFSPtr = efs
|
||||
}
|
||||
|
||||
provider := &EmbedFSProvider{
|
||||
embedFS: embedFSPtr,
|
||||
zipFile: zipFile,
|
||||
}
|
||||
|
||||
// If zipFile is specified, open it as a zip archive
|
||||
if zipFile != "" {
|
||||
// Read the zip file from the embedded FS
|
||||
// We need to check if the FS supports ReadFile
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
if readFileFS, ok := embedFS.(interface{ ReadFile(string) ([]byte, error) }); ok {
|
||||
data, err = readFileFS.ReadFile(zipFile)
|
||||
} else {
|
||||
// Fall back to Open and reading
|
||||
file, openErr := embedFS.Open(zipFile)
|
||||
if openErr != nil {
|
||||
return nil, fmt.Errorf("failed to open embedded zip file: %w", openErr)
|
||||
}
|
||||
defer file.Close()
|
||||
data, err = io.ReadAll(file)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read embedded zip file: %w", err)
|
||||
}
|
||||
|
||||
// Create a zip reader from the data
|
||||
reader := bytes.NewReader(data)
|
||||
zipReader, err := zip.NewReader(reader, int64(len(data)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip reader: %w", err)
|
||||
}
|
||||
|
||||
provider.zipReader = zipReader
|
||||
provider.fs = zipfs.NewZipFS(zipReader)
|
||||
} else {
|
||||
// Use the embedded FS directly
|
||||
provider.fs = embedFS
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
// Open opens the named file from the embedded filesystem.
|
||||
func (p *EmbedFSProvider) Open(name string) (fs.File, error) {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
|
||||
if p.fs == nil {
|
||||
return nil, fmt.Errorf("embedded filesystem is closed")
|
||||
}
|
||||
|
||||
return p.fs.Open(name)
|
||||
}
|
||||
|
||||
// Close releases any resources held by the provider.
|
||||
// For embedded filesystems, this is mostly a no-op since Go manages the lifecycle.
|
||||
func (p *EmbedFSProvider) Close() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// Clear references to allow garbage collection
|
||||
p.fs = nil
|
||||
p.zipReader = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns "embed" or "embed-zip" depending on the configuration.
|
||||
func (p *EmbedFSProvider) Type() string {
|
||||
if p.zipFile != "" {
|
||||
return "embed-zip"
|
||||
}
|
||||
return "embed"
|
||||
}
|
||||
|
||||
// ZipFile returns the path to the zip file within the embedded FS, if any.
|
||||
func (p *EmbedFSProvider) ZipFile() string {
|
||||
return p.zipFile
|
||||
}
|
||||
80
pkg/server/staticweb/providers/local.go
Normal file
80
pkg/server/staticweb/providers/local.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// LocalFSProvider serves files from a local directory.
|
||||
type LocalFSProvider struct {
|
||||
path string
|
||||
fs fs.FS
|
||||
}
|
||||
|
||||
// NewLocalFSProvider creates a new LocalFSProvider for the given directory path.
|
||||
// The path must be an absolute path to an existing directory.
|
||||
func NewLocalFSProvider(path string) (*LocalFSProvider, error) {
|
||||
// Validate that the path exists and is a directory
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to stat directory: %w", err)
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return nil, fmt.Errorf("path is not a directory: %s", path)
|
||||
}
|
||||
|
||||
// Convert to absolute path
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get absolute path: %w", err)
|
||||
}
|
||||
|
||||
return &LocalFSProvider{
|
||||
path: absPath,
|
||||
fs: os.DirFS(absPath),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the named file from the local directory.
|
||||
func (p *LocalFSProvider) Open(name string) (fs.File, error) {
|
||||
return p.fs.Open(name)
|
||||
}
|
||||
|
||||
// Close releases any resources held by the provider.
|
||||
// For local filesystem, this is a no-op since os.DirFS doesn't hold resources.
|
||||
func (p *LocalFSProvider) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns "local".
|
||||
func (p *LocalFSProvider) Type() string {
|
||||
return "local"
|
||||
}
|
||||
|
||||
// Path returns the absolute path to the directory being served.
|
||||
func (p *LocalFSProvider) Path() string {
|
||||
return p.path
|
||||
}
|
||||
|
||||
// Reload refreshes the filesystem view.
|
||||
// For local directories, os.DirFS automatically picks up changes,
|
||||
// so this recreates the DirFS to ensure a fresh view.
|
||||
func (p *LocalFSProvider) Reload() error {
|
||||
// Verify the directory still exists
|
||||
info, err := os.Stat(p.path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat directory: %w", err)
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("path is no longer a directory: %s", p.path)
|
||||
}
|
||||
|
||||
// Recreate the DirFS
|
||||
p.fs = os.DirFS(p.path)
|
||||
|
||||
return nil
|
||||
}
|
||||
393
pkg/server/staticweb/providers/providers_test.go
Normal file
393
pkg/server/staticweb/providers/providers_test.go
Normal file
@@ -0,0 +1,393 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLocalFSProvider(t *testing.T) {
|
||||
// Create a temporary directory with test files
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
testFile := filepath.Join(tmpDir, "test.txt")
|
||||
if err := os.WriteFile(testFile, []byte("test content"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
provider, err := NewLocalFSProvider(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create provider: %v", err)
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Test opening a file
|
||||
file, err := provider.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Read the file
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file: %v", err)
|
||||
}
|
||||
|
||||
if string(data) != "test content" {
|
||||
t.Errorf("Expected 'test content', got %q", string(data))
|
||||
}
|
||||
|
||||
// Test type
|
||||
if provider.Type() != "local" {
|
||||
t.Errorf("Expected type 'local', got %q", provider.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func TestZipFSProvider(t *testing.T) {
|
||||
// Create a temporary zip file
|
||||
tmpDir := t.TempDir()
|
||||
zipPath := filepath.Join(tmpDir, "test.zip")
|
||||
|
||||
// Create zip file with test content
|
||||
zipFile, err := os.Create(zipPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
zipWriter := zip.NewWriter(zipFile)
|
||||
fileWriter, err := zipWriter.Create("test.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = fileWriter.Write([]byte("zip content"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := zipWriter.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := zipFile.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Test the provider
|
||||
provider, err := NewZipFSProvider(zipPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create provider: %v", err)
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Test opening a file
|
||||
file, err := provider.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Read the file
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file: %v", err)
|
||||
}
|
||||
|
||||
if string(data) != "zip content" {
|
||||
t.Errorf("Expected 'zip content', got %q", string(data))
|
||||
}
|
||||
|
||||
// Test type
|
||||
if provider.Type() != "zip" {
|
||||
t.Errorf("Expected type 'zip', got %q", provider.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func TestZipFSProviderReload(t *testing.T) {
|
||||
// Create a temporary zip file
|
||||
tmpDir := t.TempDir()
|
||||
zipPath := filepath.Join(tmpDir, "test.zip")
|
||||
|
||||
// Helper to create zip with content
|
||||
createZip := func(content string) {
|
||||
zipFile, err := os.Create(zipPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer zipFile.Close()
|
||||
|
||||
zipWriter := zip.NewWriter(zipFile)
|
||||
fileWriter, err := zipWriter.Create("test.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = fileWriter.Write([]byte(content))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := zipWriter.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create initial zip
|
||||
createZip("original content")
|
||||
|
||||
// Test the provider
|
||||
provider, err := NewZipFSProvider(zipPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create provider: %v", err)
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Read initial content
|
||||
file, err := provider.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file: %v", err)
|
||||
}
|
||||
data, _ := io.ReadAll(file)
|
||||
file.Close()
|
||||
|
||||
if string(data) != "original content" {
|
||||
t.Errorf("Expected 'original content', got %q", string(data))
|
||||
}
|
||||
|
||||
// Update the zip file
|
||||
createZip("updated content")
|
||||
|
||||
// Reload the provider
|
||||
if err := provider.Reload(); err != nil {
|
||||
t.Fatalf("Failed to reload: %v", err)
|
||||
}
|
||||
|
||||
// Read updated content
|
||||
file, err = provider.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file after reload: %v", err)
|
||||
}
|
||||
data, _ = io.ReadAll(file)
|
||||
file.Close()
|
||||
|
||||
if string(data) != "updated content" {
|
||||
t.Errorf("Expected 'updated content', got %q", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalFSProviderReload(t *testing.T) {
|
||||
// Create a temporary directory with test files
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
testFile := filepath.Join(tmpDir, "test.txt")
|
||||
if err := os.WriteFile(testFile, []byte("original"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
provider, err := NewLocalFSProvider(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create provider: %v", err)
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Read initial content
|
||||
file, err := provider.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file: %v", err)
|
||||
}
|
||||
data, _ := io.ReadAll(file)
|
||||
file.Close()
|
||||
|
||||
if string(data) != "original" {
|
||||
t.Errorf("Expected 'original', got %q", string(data))
|
||||
}
|
||||
|
||||
// Update the file
|
||||
if err := os.WriteFile(testFile, []byte("updated"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reload the provider
|
||||
if err := provider.Reload(); err != nil {
|
||||
t.Fatalf("Failed to reload: %v", err)
|
||||
}
|
||||
|
||||
// Read updated content
|
||||
file, err = provider.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file after reload: %v", err)
|
||||
}
|
||||
data, _ = io.ReadAll(file)
|
||||
file.Close()
|
||||
|
||||
if string(data) != "updated" {
|
||||
t.Errorf("Expected 'updated', got %q", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmbedFSProvider(t *testing.T) {
|
||||
// Test with a mock embed.FS
|
||||
mockFS := &mockEmbedFS{
|
||||
files: map[string][]byte{
|
||||
"test.txt": []byte("test content"),
|
||||
},
|
||||
}
|
||||
|
||||
provider, err := NewEmbedFSProvider(mockFS, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create provider: %v", err)
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Test type
|
||||
if provider.Type() != "embed" {
|
||||
t.Errorf("Expected type 'embed', got %q", provider.Type())
|
||||
}
|
||||
|
||||
// Test opening a file
|
||||
file, err := provider.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Read the file
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file: %v", err)
|
||||
}
|
||||
|
||||
if string(data) != "test content" {
|
||||
t.Errorf("Expected 'test content', got %q", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmbedFSProviderWithZip(t *testing.T) {
|
||||
// Create an embedded-like FS with a zip file
|
||||
// For simplicity, we'll use a mock embed.FS
|
||||
tmpDir := t.TempDir()
|
||||
zipPath := filepath.Join(tmpDir, "test.zip")
|
||||
|
||||
// Create zip file
|
||||
zipFile, err := os.Create(zipPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
zipWriter := zip.NewWriter(zipFile)
|
||||
fileWriter, err := zipWriter.Create("test.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = fileWriter.Write([]byte("embedded zip content"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
zipWriter.Close()
|
||||
zipFile.Close()
|
||||
|
||||
// Read the zip file
|
||||
zipData, err := os.ReadFile(zipPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a mock embed.FS
|
||||
mockFS := &mockEmbedFS{
|
||||
files: map[string][]byte{
|
||||
"test.zip": zipData,
|
||||
},
|
||||
}
|
||||
|
||||
provider, err := NewEmbedFSProvider(mockFS, "test.zip")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create provider: %v", err)
|
||||
}
|
||||
defer provider.Close()
|
||||
|
||||
// Test opening a file
|
||||
file, err := provider.Open("test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Read the file
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file: %v", err)
|
||||
}
|
||||
|
||||
if string(data) != "embedded zip content" {
|
||||
t.Errorf("Expected 'embedded zip content', got %q", string(data))
|
||||
}
|
||||
|
||||
// Test type
|
||||
if provider.Type() != "embed-zip" {
|
||||
t.Errorf("Expected type 'embed-zip', got %q", provider.Type())
|
||||
}
|
||||
}
|
||||
|
||||
// mockEmbedFS is a mock embed.FS for testing
|
||||
type mockEmbedFS struct {
|
||||
files map[string][]byte
|
||||
}
|
||||
|
||||
func (m *mockEmbedFS) Open(name string) (fs.File, error) {
|
||||
data, ok := m.files[name]
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
return &mockFile{
|
||||
name: name,
|
||||
reader: bytes.NewReader(data),
|
||||
size: int64(len(data)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockEmbedFS) ReadFile(name string) ([]byte, error) {
|
||||
data, ok := m.files[name]
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
type mockFile struct {
|
||||
name string
|
||||
reader *bytes.Reader
|
||||
size int64
|
||||
}
|
||||
|
||||
func (f *mockFile) Stat() (fs.FileInfo, error) {
|
||||
return &mockFileInfo{name: f.name, size: f.size}, nil
|
||||
}
|
||||
|
||||
func (f *mockFile) Read(p []byte) (int, error) {
|
||||
return f.reader.Read(p)
|
||||
}
|
||||
|
||||
func (f *mockFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
}
|
||||
|
||||
func (fi *mockFileInfo) Name() string { return fi.name }
|
||||
func (fi *mockFileInfo) Size() int64 { return fi.size }
|
||||
func (fi *mockFileInfo) Mode() fs.FileMode { return 0644 }
|
||||
func (fi *mockFileInfo) ModTime() time.Time { return time.Now() }
|
||||
func (fi *mockFileInfo) IsDir() bool { return false }
|
||||
func (fi *mockFileInfo) Sys() interface{} { return nil }
|
||||
102
pkg/server/staticweb/providers/zip.go
Normal file
102
pkg/server/staticweb/providers/zip.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/server/zipfs"
|
||||
)
|
||||
|
||||
// ZipFSProvider serves files from a zip file.
|
||||
type ZipFSProvider struct {
|
||||
zipPath string
|
||||
zipReader *zip.ReadCloser
|
||||
zipFS *zipfs.ZipFS
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewZipFSProvider creates a new ZipFSProvider for the given zip file path.
|
||||
func NewZipFSProvider(zipPath string) (*ZipFSProvider, error) {
|
||||
// Convert to absolute path
|
||||
absPath, err := filepath.Abs(zipPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get absolute path: %w", err)
|
||||
}
|
||||
|
||||
// Open the zip file
|
||||
zipReader, err := zip.OpenReader(absPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open zip file: %w", err)
|
||||
}
|
||||
|
||||
return &ZipFSProvider{
|
||||
zipPath: absPath,
|
||||
zipReader: zipReader,
|
||||
zipFS: zipfs.NewZipFS(&zipReader.Reader),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the named file from the zip archive.
|
||||
func (p *ZipFSProvider) Open(name string) (fs.File, error) {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
|
||||
if p.zipFS == nil {
|
||||
return nil, fmt.Errorf("zip filesystem is closed")
|
||||
}
|
||||
|
||||
return p.zipFS.Open(name)
|
||||
}
|
||||
|
||||
// Close releases resources held by the zip reader.
|
||||
func (p *ZipFSProvider) Close() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.zipReader != nil {
|
||||
err := p.zipReader.Close()
|
||||
p.zipReader = nil
|
||||
p.zipFS = nil
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns "zip".
|
||||
func (p *ZipFSProvider) Type() string {
|
||||
return "zip"
|
||||
}
|
||||
|
||||
// Path returns the absolute path to the zip file being served.
|
||||
func (p *ZipFSProvider) Path() string {
|
||||
return p.zipPath
|
||||
}
|
||||
|
||||
// Reload reopens the zip file to pick up any changes.
|
||||
// This is useful in development when the zip file is updated.
|
||||
func (p *ZipFSProvider) Reload() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// Close the existing zip reader if open
|
||||
if p.zipReader != nil {
|
||||
if err := p.zipReader.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close old zip reader: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Reopen the zip file
|
||||
zipReader, err := zip.OpenReader(p.zipPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reopen zip file: %w", err)
|
||||
}
|
||||
|
||||
p.zipReader = zipReader
|
||||
p.zipFS = zipfs.NewZipFS(&zipReader.Reader)
|
||||
|
||||
return nil
|
||||
}
|
||||
189
pkg/server/staticweb/service.go
Normal file
189
pkg/server/staticweb/service.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package staticweb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// service implements the StaticFileService interface.
|
||||
type service struct {
|
||||
mounts map[string]*mountPoint // urlPrefix -> mount point
|
||||
config *ServiceConfig
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewService creates a new static file service with the given configuration.
|
||||
// If config is nil, default configuration is used.
|
||||
func NewService(config *ServiceConfig) StaticFileService {
|
||||
if config == nil {
|
||||
config = DefaultServiceConfig()
|
||||
}
|
||||
|
||||
return &service{
|
||||
mounts: make(map[string]*mountPoint),
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Mount adds a new mount point with the given configuration.
|
||||
func (s *service) Mount(config MountConfig) error {
|
||||
// Validate the config
|
||||
if err := s.validateMountConfig(config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Check if the prefix is already mounted
|
||||
if _, exists := s.mounts[config.URLPrefix]; exists {
|
||||
return fmt.Errorf("mount point already exists at %s", config.URLPrefix)
|
||||
}
|
||||
|
||||
// Create the mount point
|
||||
mp, err := newMountPoint(config, s.config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create mount point: %w", err)
|
||||
}
|
||||
|
||||
// Add to the map
|
||||
s.mounts[config.URLPrefix] = mp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount removes the mount point at the given URL prefix.
|
||||
func (s *service) Unmount(urlPrefix string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
mp, exists := s.mounts[urlPrefix]
|
||||
if !exists {
|
||||
return fmt.Errorf("no mount point exists at %s", urlPrefix)
|
||||
}
|
||||
|
||||
// Close the mount point to release resources
|
||||
if err := mp.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close mount point: %w", err)
|
||||
}
|
||||
|
||||
// Remove from the map
|
||||
delete(s.mounts, urlPrefix)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListMounts returns a sorted list of all mounted URL prefixes.
|
||||
func (s *service) ListMounts() []string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
prefixes := make([]string, 0, len(s.mounts))
|
||||
for prefix := range s.mounts {
|
||||
prefixes = append(prefixes, prefix)
|
||||
}
|
||||
|
||||
sort.Strings(prefixes)
|
||||
return prefixes
|
||||
}
|
||||
|
||||
// Reload reinitializes all filesystem providers that support reloading.
|
||||
// This is useful when the underlying files have changed (e.g., zip file updated).
|
||||
// Providers that implement ReloadableProvider will be reloaded.
|
||||
func (s *service) Reload() error {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
var errors []error
|
||||
|
||||
// Reload all mount points that support it
|
||||
for prefix, mp := range s.mounts {
|
||||
if reloadable, ok := mp.provider.(ReloadableProvider); ok {
|
||||
if err := reloadable.Reload(); err != nil {
|
||||
errors = append(errors, fmt.Errorf("%s: %w", prefix, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return combined errors if any
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("errors while reloading providers: %v", errors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close releases all resources held by the service.
|
||||
func (s *service) Close() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
var errors []error
|
||||
|
||||
// Close all mount points
|
||||
for prefix, mp := range s.mounts {
|
||||
if err := mp.Close(); err != nil {
|
||||
errors = append(errors, fmt.Errorf("%s: %w", prefix, err))
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the map
|
||||
s.mounts = make(map[string]*mountPoint)
|
||||
|
||||
// Return combined errors if any
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("errors while closing mount points: %v", errors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handler returns an http.Handler that serves static files from all mount points.
|
||||
func (s *service) Handler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Find the best matching mount point using longest-prefix matching
|
||||
var bestMatch *mountPoint
|
||||
var bestPrefix string
|
||||
|
||||
for prefix, mp := range s.mounts {
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
if len(prefix) > len(bestPrefix) {
|
||||
bestMatch = mp
|
||||
bestPrefix = prefix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no mount point matches, return without writing a response
|
||||
// This allows other handlers (like API routes) to process the request
|
||||
if bestMatch == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Serve the file from the matched mount point
|
||||
bestMatch.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// validateMountConfig validates the mount configuration.
|
||||
func (s *service) validateMountConfig(config MountConfig) error {
|
||||
if config.URLPrefix == "" {
|
||||
return fmt.Errorf("URLPrefix cannot be empty")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(config.URLPrefix, "/") {
|
||||
return fmt.Errorf("URLPrefix must start with /")
|
||||
}
|
||||
|
||||
if config.Provider == nil {
|
||||
return fmt.Errorf("provider cannot be nil")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
257
pkg/server/staticweb/service_test.go
Normal file
257
pkg/server/staticweb/service_test.go
Normal file
@@ -0,0 +1,257 @@
|
||||
package staticweb
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
staticwebtesting "github.com/bitechdev/ResolveSpec/pkg/server/staticweb/testing"
|
||||
)
|
||||
|
||||
func TestServiceMount(t *testing.T) {
|
||||
service := NewService(nil)
|
||||
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
})
|
||||
|
||||
err := service.Mount(MountConfig{
|
||||
URLPrefix: "/test",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to mount: %v", err)
|
||||
}
|
||||
|
||||
mounts := service.ListMounts()
|
||||
if len(mounts) != 1 || mounts[0] != "/test" {
|
||||
t.Errorf("Expected [/test], got %v", mounts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceUnmount(t *testing.T) {
|
||||
service := NewService(nil)
|
||||
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
})
|
||||
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/test",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
err := service.Unmount("/test")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to unmount: %v", err)
|
||||
}
|
||||
|
||||
mounts := service.ListMounts()
|
||||
if len(mounts) != 0 {
|
||||
t.Errorf("Expected empty list, got %v", mounts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceHandler(t *testing.T) {
|
||||
service := NewService(nil)
|
||||
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
"app.js": []byte("console.log('test')"),
|
||||
})
|
||||
|
||||
err := service.Mount(MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to mount: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
expectedStatus int
|
||||
expectedBody string
|
||||
}{
|
||||
{
|
||||
name: "serve index.html",
|
||||
path: "/static/index.html",
|
||||
expectedStatus: http.StatusOK,
|
||||
expectedBody: "<html>test</html>",
|
||||
},
|
||||
{
|
||||
name: "serve app.js",
|
||||
path: "/static/app.js",
|
||||
expectedStatus: http.StatusOK,
|
||||
expectedBody: "console.log('test')",
|
||||
},
|
||||
{
|
||||
name: "non-existent file",
|
||||
path: "/static/nonexistent.html",
|
||||
expectedStatus: http.StatusNotFound,
|
||||
expectedBody: "",
|
||||
},
|
||||
{
|
||||
name: "non-matching prefix returns nothing",
|
||||
path: "/api/test",
|
||||
expectedStatus: http.StatusOK, // Handler returns without writing
|
||||
expectedBody: "",
|
||||
},
|
||||
}
|
||||
|
||||
handler := service.Handler()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", tt.path, nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rec, req)
|
||||
|
||||
// For non-matching prefix, handler doesn't write anything
|
||||
if tt.path == "/api/test" {
|
||||
if rec.Code != 200 || rec.Body.Len() != 0 {
|
||||
t.Errorf("Expected no response, got status %d with body length %d", rec.Code, rec.Body.Len())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if rec.Code != tt.expectedStatus {
|
||||
t.Errorf("Expected status %d, got %d", tt.expectedStatus, rec.Code)
|
||||
}
|
||||
|
||||
if tt.expectedBody != "" && rec.Body.String() != tt.expectedBody {
|
||||
t.Errorf("Expected body %q, got %q", tt.expectedBody, rec.Body.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceLongestPrefixMatching(t *testing.T) {
|
||||
service := NewService(nil)
|
||||
|
||||
// Mount at /
|
||||
provider1 := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("root"),
|
||||
})
|
||||
|
||||
// Mount at /static
|
||||
provider2 := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("static"),
|
||||
})
|
||||
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/",
|
||||
Provider: provider1,
|
||||
})
|
||||
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/static",
|
||||
Provider: provider2,
|
||||
})
|
||||
|
||||
handler := service.Handler()
|
||||
|
||||
tests := []struct {
|
||||
path string
|
||||
expectedBody string
|
||||
}{
|
||||
{"/index.html", "root"},
|
||||
{"/static/index.html", "static"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", tt.path, nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", rec.Code)
|
||||
}
|
||||
|
||||
if rec.Body.String() != tt.expectedBody {
|
||||
t.Errorf("Expected body %q, got %q", tt.expectedBody, rec.Body.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceClose(t *testing.T) {
|
||||
service := NewService(nil)
|
||||
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("<html>test</html>"),
|
||||
})
|
||||
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/test",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
err := service.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to close service: %v", err)
|
||||
}
|
||||
|
||||
mounts := service.ListMounts()
|
||||
if len(mounts) != 0 {
|
||||
t.Errorf("Expected empty list after close, got %v", mounts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceReload(t *testing.T) {
|
||||
service := NewService(nil)
|
||||
|
||||
// Create a mock provider that supports reload
|
||||
provider := staticwebtesting.NewMockProvider(map[string][]byte{
|
||||
"index.html": []byte("original"),
|
||||
})
|
||||
|
||||
service.Mount(MountConfig{
|
||||
URLPrefix: "/test",
|
||||
Provider: provider,
|
||||
})
|
||||
|
||||
handler := service.Handler()
|
||||
|
||||
// Test initial content
|
||||
req := httptest.NewRequest("GET", "/test/index.html", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", rec.Code)
|
||||
}
|
||||
|
||||
if rec.Body.String() != "original" {
|
||||
t.Errorf("Expected body 'original', got %q", rec.Body.String())
|
||||
}
|
||||
|
||||
// Update the provider's content
|
||||
provider.AddFile("index.html", []byte("updated"))
|
||||
|
||||
// The content is already updated since we're using a mock
|
||||
// In a real scenario with zip files, you'd call Reload() here
|
||||
err := service.Reload()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reload service: %v", err)
|
||||
}
|
||||
|
||||
// Test updated content
|
||||
req = httptest.NewRequest("GET", "/test/index.html", nil)
|
||||
rec = httptest.NewRecorder()
|
||||
handler.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", rec.Code)
|
||||
}
|
||||
|
||||
if rec.Body.String() != "updated" {
|
||||
t.Errorf("Expected body 'updated', got %q", rec.Body.String())
|
||||
}
|
||||
}
|
||||
231
pkg/server/staticweb/testing/mocks.go
Normal file
231
pkg/server/staticweb/testing/mocks.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MockFileSystemProvider is an in-memory filesystem provider for testing.
|
||||
type MockFileSystemProvider struct {
|
||||
files map[string][]byte
|
||||
closed bool
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMockProvider creates a new in-memory provider with the given files.
|
||||
// Keys should be slash-separated paths (e.g., "index.html", "assets/app.js").
|
||||
func NewMockProvider(files map[string][]byte) *MockFileSystemProvider {
|
||||
return &MockFileSystemProvider{
|
||||
files: files,
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens a file from the in-memory filesystem.
|
||||
func (m *MockFileSystemProvider) Open(name string) (fs.File, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
if m.closed {
|
||||
return nil, fmt.Errorf("provider is closed")
|
||||
}
|
||||
|
||||
// Remove leading slash if present
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
|
||||
data, ok := m.files[name]
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
return &mockFile{
|
||||
name: path.Base(name),
|
||||
data: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close marks the provider as closed.
|
||||
func (m *MockFileSystemProvider) Close() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns "mock".
|
||||
func (m *MockFileSystemProvider) Type() string {
|
||||
return "mock"
|
||||
}
|
||||
|
||||
// AddFile adds a file to the in-memory filesystem.
|
||||
func (m *MockFileSystemProvider) AddFile(name string, data []byte) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
m.files[name] = data
|
||||
}
|
||||
|
||||
// RemoveFile removes a file from the in-memory filesystem.
|
||||
func (m *MockFileSystemProvider) RemoveFile(name string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
delete(m.files, name)
|
||||
}
|
||||
|
||||
// mockFile implements fs.File for in-memory files.
|
||||
type mockFile struct {
|
||||
name string
|
||||
data []byte
|
||||
reader *bytes.Reader
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (f *mockFile) Stat() (fs.FileInfo, error) {
|
||||
return &mockFileInfo{
|
||||
name: f.name,
|
||||
size: int64(len(f.data)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *mockFile) Read(p []byte) (int, error) {
|
||||
if f.reader == nil {
|
||||
f.reader = bytes.NewReader(f.data)
|
||||
if f.offset > 0 {
|
||||
f.reader.Seek(f.offset, io.SeekStart)
|
||||
}
|
||||
}
|
||||
n, err := f.reader.Read(p)
|
||||
f.offset += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *mockFile) Seek(offset int64, whence int) (int64, error) {
|
||||
if f.reader == nil {
|
||||
f.reader = bytes.NewReader(f.data)
|
||||
}
|
||||
pos, err := f.reader.Seek(offset, whence)
|
||||
f.offset = pos
|
||||
return pos, err
|
||||
}
|
||||
|
||||
func (f *mockFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mockFileInfo implements fs.FileInfo.
|
||||
type mockFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
}
|
||||
|
||||
func (fi *mockFileInfo) Name() string { return fi.name }
|
||||
func (fi *mockFileInfo) Size() int64 { return fi.size }
|
||||
func (fi *mockFileInfo) Mode() fs.FileMode { return 0644 }
|
||||
func (fi *mockFileInfo) ModTime() time.Time { return time.Now() }
|
||||
func (fi *mockFileInfo) IsDir() bool { return false }
|
||||
func (fi *mockFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
// MockCachePolicy is a configurable cache policy for testing.
|
||||
type MockCachePolicy struct {
|
||||
CacheTime int
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// NewMockCachePolicy creates a new mock cache policy.
|
||||
func NewMockCachePolicy(cacheTime int) *MockCachePolicy {
|
||||
return &MockCachePolicy{
|
||||
CacheTime: cacheTime,
|
||||
Headers: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// GetCacheTime returns the configured cache time.
|
||||
func (p *MockCachePolicy) GetCacheTime(path string) int {
|
||||
return p.CacheTime
|
||||
}
|
||||
|
||||
// GetCacheHeaders returns the configured headers.
|
||||
func (p *MockCachePolicy) GetCacheHeaders(path string) map[string]string {
|
||||
if p.Headers != nil {
|
||||
return p.Headers
|
||||
}
|
||||
return map[string]string{
|
||||
"Cache-Control": fmt.Sprintf("public, max-age=%d", p.CacheTime),
|
||||
}
|
||||
}
|
||||
|
||||
// MockMIMEResolver is a configurable MIME resolver for testing.
|
||||
type MockMIMEResolver struct {
|
||||
types map[string]string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMockMIMEResolver creates a new mock MIME resolver.
|
||||
func NewMockMIMEResolver() *MockMIMEResolver {
|
||||
return &MockMIMEResolver{
|
||||
types: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMIMEType returns the MIME type for the given path.
|
||||
func (r *MockMIMEResolver) GetMIMEType(path string) string {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
ext := strings.ToLower(path[strings.LastIndex(path, "."):])
|
||||
if mimeType, ok := r.types[ext]; ok {
|
||||
return mimeType
|
||||
}
|
||||
return "application/octet-stream"
|
||||
}
|
||||
|
||||
// RegisterMIMEType registers a MIME type.
|
||||
func (r *MockMIMEResolver) RegisterMIMEType(extension, mimeType string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if !strings.HasPrefix(extension, ".") {
|
||||
extension = "." + extension
|
||||
}
|
||||
r.types[strings.ToLower(extension)] = mimeType
|
||||
}
|
||||
|
||||
// MockFallbackStrategy is a configurable fallback strategy for testing.
|
||||
type MockFallbackStrategy struct {
|
||||
ShouldFallbackFunc func(path string) bool
|
||||
FallbackPathFunc func(path string) string
|
||||
}
|
||||
|
||||
// NewMockFallbackStrategy creates a new mock fallback strategy.
|
||||
func NewMockFallbackStrategy(shouldFallback func(string) bool, fallbackPath func(string) string) *MockFallbackStrategy {
|
||||
return &MockFallbackStrategy{
|
||||
ShouldFallbackFunc: shouldFallback,
|
||||
FallbackPathFunc: fallbackPath,
|
||||
}
|
||||
}
|
||||
|
||||
// ShouldFallback returns whether fallback should be used.
|
||||
func (s *MockFallbackStrategy) ShouldFallback(path string) bool {
|
||||
if s.ShouldFallbackFunc != nil {
|
||||
return s.ShouldFallbackFunc(path)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetFallbackPath returns the fallback path.
|
||||
func (s *MockFallbackStrategy) GetFallbackPath(path string) string {
|
||||
if s.FallbackPathFunc != nil {
|
||||
return s.FallbackPathFunc(path)
|
||||
}
|
||||
return "index.html"
|
||||
}
|
||||
112
pkg/server/zipfs/zipfs.go
Normal file
112
pkg/server/zipfs/zipfs.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package zipfs
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
)
|
||||
|
||||
type ZipFS struct {
|
||||
*zip.Reader
|
||||
}
|
||||
|
||||
func NewZipFS(r *zip.Reader) *ZipFS {
|
||||
return &ZipFS{r}
|
||||
}
|
||||
|
||||
func (z *ZipFS) Open(name string) (fs.File, error) {
|
||||
for _, f := range z.File {
|
||||
if f.Name == name {
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ZipFile{f, rc, 0}, nil
|
||||
}
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
type ZipFile struct {
|
||||
*zip.File
|
||||
rc io.ReadCloser
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (f *ZipFile) Stat() (fs.FileInfo, error) {
|
||||
if f.File != nil {
|
||||
return f.FileInfo(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("no file")
|
||||
}
|
||||
|
||||
func (f *ZipFile) Close() error {
|
||||
if f.rc != nil {
|
||||
return f.rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ZipFile) Read(b []byte) (int, error) {
|
||||
if f.rc == nil {
|
||||
var err error
|
||||
f.rc, err = f.Open()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n, err := f.rc.Read(b)
|
||||
f.offset += int64(n)
|
||||
if err == io.EOF {
|
||||
f.rc.Close()
|
||||
f.rc = nil
|
||||
}
|
||||
return n, err
|
||||
|
||||
}
|
||||
func (f *ZipFile) Seek(offset int64, whence int) (int64, error) {
|
||||
if f.rc != nil {
|
||||
f.rc.Close()
|
||||
f.rc = nil
|
||||
}
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
if offset < 0 {
|
||||
return 0, &fs.PathError{Op: "seek", Path: f.Name, Err: fmt.Errorf("negative position")}
|
||||
}
|
||||
f.offset = offset
|
||||
case io.SeekCurrent:
|
||||
if f.offset+offset < 0 {
|
||||
return 0, &fs.PathError{Op: "seek", Path: f.Name, Err: fmt.Errorf("negative position")}
|
||||
}
|
||||
f.offset += offset
|
||||
case io.SeekEnd:
|
||||
size := int64(f.UncompressedSize64)
|
||||
if size+offset < 0 {
|
||||
return 0, &fs.PathError{Op: "seek", Path: f.Name, Err: fmt.Errorf("negative position")}
|
||||
}
|
||||
f.offset = size + offset
|
||||
}
|
||||
return f.offset, nil
|
||||
}
|
||||
|
||||
/*
|
||||
func main() {
|
||||
r, err := zip.OpenReader("path/to/your.zip")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
fs := NewZipFS(&r.Reader)
|
||||
file, err := fs.Open(path.Join("path", "to", "file"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Now you can use 'file' as a fs.File
|
||||
}
|
||||
*/
|
||||
@@ -126,6 +126,13 @@ func (n SqlNull[T]) Value() (driver.Value, error) {
|
||||
if !n.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Check if the type implements fmt.Stringer (e.g., uuid.UUID, custom types)
|
||||
// Convert to string for driver compatibility
|
||||
if stringer, ok := any(n.Val).(fmt.Stringer); ok {
|
||||
return stringer.String(), nil
|
||||
}
|
||||
|
||||
return any(n.Val), nil
|
||||
}
|
||||
|
||||
@@ -167,6 +174,10 @@ func (n SqlNull[T]) String() string {
|
||||
if !n.Valid {
|
||||
return ""
|
||||
}
|
||||
// Check if the type implements fmt.Stringer for better string representation
|
||||
if stringer, ok := any(n.Val).(fmt.Stringer); ok {
|
||||
return stringer.String()
|
||||
}
|
||||
return fmt.Sprintf("%v", n.Val)
|
||||
}
|
||||
|
||||
|
||||
@@ -486,7 +486,8 @@ func TestSqlUUID_Value(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Value failed: %v", err)
|
||||
}
|
||||
if val != testUUID {
|
||||
// Value() should return a string for driver compatibility
|
||||
if val != testUUID.String() {
|
||||
t.Errorf("expected %s, got %s", testUUID.String(), val)
|
||||
}
|
||||
|
||||
|
||||
180
pkg/spectypes/uuid_integration_test.go
Normal file
180
pkg/spectypes/uuid_integration_test.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package spectypes
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// TestUUIDWithRealDatabase tests that SqlUUID works with actual database operations
|
||||
func TestUUIDWithRealDatabase(t *testing.T) {
|
||||
// Open an in-memory SQLite database
|
||||
db, err := sql.Open("sqlite3", ":memory:")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a test table with UUID column
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE test_users (
|
||||
id INTEGER PRIMARY KEY,
|
||||
user_id TEXT,
|
||||
name TEXT
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create table: %v", err)
|
||||
}
|
||||
|
||||
// Test 1: Insert with UUID
|
||||
testUUID1 := uuid.New()
|
||||
sqlUUID1 := NewSqlUUID(testUUID1)
|
||||
|
||||
_, err = db.Exec("INSERT INTO test_users (id, user_id, name) VALUES (?, ?, ?)",
|
||||
1, sqlUUID1, "Alice")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert record: %v", err)
|
||||
}
|
||||
|
||||
// Test 2: Update with UUID
|
||||
testUUID2 := uuid.New()
|
||||
sqlUUID2 := NewSqlUUID(testUUID2)
|
||||
|
||||
_, err = db.Exec("UPDATE test_users SET user_id = ? WHERE id = ?",
|
||||
sqlUUID2, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update record: %v", err)
|
||||
}
|
||||
|
||||
// Test 3: Read back and verify
|
||||
var retrievedID string
|
||||
var name string
|
||||
err = db.QueryRow("SELECT user_id, name FROM test_users WHERE id = ?", 1).Scan(&retrievedID, &name)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query record: %v", err)
|
||||
}
|
||||
|
||||
if retrievedID != testUUID2.String() {
|
||||
t.Errorf("Expected UUID %s, got %s", testUUID2.String(), retrievedID)
|
||||
}
|
||||
|
||||
if name != "Alice" {
|
||||
t.Errorf("Expected name 'Alice', got '%s'", name)
|
||||
}
|
||||
|
||||
// Test 4: Insert with NULL UUID
|
||||
nullUUID := SqlUUID{Valid: false}
|
||||
_, err = db.Exec("INSERT INTO test_users (id, user_id, name) VALUES (?, ?, ?)",
|
||||
2, nullUUID, "Bob")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert record with NULL UUID: %v", err)
|
||||
}
|
||||
|
||||
// Test 5: Read NULL UUID back
|
||||
var retrievedNullID sql.NullString
|
||||
err = db.QueryRow("SELECT user_id FROM test_users WHERE id = ?", 2).Scan(&retrievedNullID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query NULL UUID record: %v", err)
|
||||
}
|
||||
|
||||
if retrievedNullID.Valid {
|
||||
t.Errorf("Expected NULL UUID, got %s", retrievedNullID.String)
|
||||
}
|
||||
|
||||
t.Logf("All database operations with UUID succeeded!")
|
||||
}
|
||||
|
||||
// TestUUIDValueReturnsString verifies that Value() returns string, not uuid.UUID
|
||||
func TestUUIDValueReturnsString(t *testing.T) {
|
||||
testUUID := uuid.New()
|
||||
sqlUUID := NewSqlUUID(testUUID)
|
||||
|
||||
val, err := sqlUUID.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("Value() failed: %v", err)
|
||||
}
|
||||
|
||||
// The value should be a string, not a uuid.UUID
|
||||
strVal, ok := val.(string)
|
||||
if !ok {
|
||||
t.Fatalf("Expected Value() to return string, got %T", val)
|
||||
}
|
||||
|
||||
if strVal != testUUID.String() {
|
||||
t.Errorf("Expected %s, got %s", testUUID.String(), strVal)
|
||||
}
|
||||
|
||||
t.Logf("✓ Value() correctly returns string: %s", strVal)
|
||||
}
|
||||
|
||||
// CustomStringableType is a custom type that implements fmt.Stringer
|
||||
type CustomStringableType string
|
||||
|
||||
func (c CustomStringableType) String() string {
|
||||
return "custom:" + string(c)
|
||||
}
|
||||
|
||||
// TestCustomStringableType verifies that any type implementing fmt.Stringer works
|
||||
func TestCustomStringableType(t *testing.T) {
|
||||
customVal := CustomStringableType("test-value")
|
||||
sqlCustom := SqlNull[CustomStringableType]{
|
||||
Val: customVal,
|
||||
Valid: true,
|
||||
}
|
||||
|
||||
val, err := sqlCustom.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("Value() failed: %v", err)
|
||||
}
|
||||
|
||||
// Should return the result of String() method
|
||||
strVal, ok := val.(string)
|
||||
if !ok {
|
||||
t.Fatalf("Expected Value() to return string, got %T", val)
|
||||
}
|
||||
|
||||
expected := "custom:test-value"
|
||||
if strVal != expected {
|
||||
t.Errorf("Expected %s, got %s", expected, strVal)
|
||||
}
|
||||
|
||||
t.Logf("✓ Custom Stringer type correctly converted to string: %s", strVal)
|
||||
}
|
||||
|
||||
// TestStringMethodUsesStringer verifies that String() method also uses fmt.Stringer
|
||||
func TestStringMethodUsesStringer(t *testing.T) {
|
||||
// Test with UUID
|
||||
testUUID := uuid.New()
|
||||
sqlUUID := NewSqlUUID(testUUID)
|
||||
|
||||
strResult := sqlUUID.String()
|
||||
if strResult != testUUID.String() {
|
||||
t.Errorf("Expected UUID String() to return %s, got %s", testUUID.String(), strResult)
|
||||
}
|
||||
t.Logf("✓ UUID String() method: %s", strResult)
|
||||
|
||||
// Test with custom Stringer type
|
||||
customVal := CustomStringableType("test-value")
|
||||
sqlCustom := SqlNull[CustomStringableType]{
|
||||
Val: customVal,
|
||||
Valid: true,
|
||||
}
|
||||
|
||||
customStr := sqlCustom.String()
|
||||
expected := "custom:test-value"
|
||||
if customStr != expected {
|
||||
t.Errorf("Expected custom String() to return %s, got %s", expected, customStr)
|
||||
}
|
||||
t.Logf("✓ Custom Stringer String() method: %s", customStr)
|
||||
|
||||
// Test with regular type (should use fmt.Sprintf)
|
||||
sqlInt := NewSqlInt64(42)
|
||||
intStr := sqlInt.String()
|
||||
if intStr != "42" {
|
||||
t.Errorf("Expected int String() to return '42', got '%s'", intStr)
|
||||
}
|
||||
t.Logf("✓ Regular type String() method: %s", intStr)
|
||||
}
|
||||
Reference in New Issue
Block a user