mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2026-04-09 09:26:24 +00:00
Compare commits
52 Commits
v1.0.44
...
feature-ke
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
79a3912f93 | ||
|
|
a9bf08f58b | ||
|
|
405a04a192 | ||
|
|
c1b16d363a | ||
|
|
568df8c6d6 | ||
|
|
aa362c77da | ||
|
|
1641eaf278 | ||
|
|
200a03c225 | ||
|
|
7ef9cf39d3 | ||
|
|
7f6410f665 | ||
|
|
835bbb0727 | ||
|
|
047a1cc187 | ||
|
|
7a498edab7 | ||
|
|
f10bb0827e | ||
|
|
22a4ab345a | ||
|
|
e289c2ed8f | ||
|
|
0d50bcfee6 | ||
| 4df626ea71 | |||
|
|
7dd630dec2 | ||
|
|
613bf22cbd | ||
| d1ae4fe64e | |||
| 254102bfac | |||
| 6c27419dbc | |||
| 377336caf4 | |||
| 79720d5421 | |||
| e7ab0a20d6 | |||
| e4087104a9 | |||
|
|
17e580a9d3 | ||
|
|
337a007d57 | ||
|
|
e923b0a2a3 | ||
| ea4a4371ba | |||
| b3694e50fe | |||
| b76dae5991 | |||
| dc85008d7f | |||
|
|
fd77385dd6 | ||
|
|
b322ef76a2 | ||
|
|
a6c7edb0e4 | ||
| 71eeb8315e | |||
|
|
4bf3d0224e | ||
|
|
50d0caabc2 | ||
|
|
5269ae4de2 | ||
|
|
646620ed83 | ||
| 7600a6d1fb | |||
| 2e7b3e7abd | |||
| fdf9e118c5 | |||
| e11e6a8bf7 | |||
| 261f98eb29 | |||
| 0b8d11361c | |||
|
|
e70bab92d7 | ||
|
|
fc8f44e3e8 | ||
|
|
584bb9813d | ||
|
|
17239d1611 |
90
.env.example
90
.env.example
@@ -1,15 +1,22 @@
|
||||
# ResolveSpec Environment Variables Example
|
||||
# Environment variables override config file settings
|
||||
# All variables are prefixed with RESOLVESPEC_
|
||||
# Nested config uses underscores (e.g., server.addr -> RESOLVESPEC_SERVER_ADDR)
|
||||
# Nested config uses underscores (e.g., servers.default_server -> RESOLVESPEC_SERVERS_DEFAULT_SERVER)
|
||||
|
||||
# Server Configuration
|
||||
RESOLVESPEC_SERVER_ADDR=:8080
|
||||
RESOLVESPEC_SERVER_SHUTDOWN_TIMEOUT=30s
|
||||
RESOLVESPEC_SERVER_DRAIN_TIMEOUT=25s
|
||||
RESOLVESPEC_SERVER_READ_TIMEOUT=10s
|
||||
RESOLVESPEC_SERVER_WRITE_TIMEOUT=10s
|
||||
RESOLVESPEC_SERVER_IDLE_TIMEOUT=120s
|
||||
RESOLVESPEC_SERVERS_DEFAULT_SERVER=main
|
||||
RESOLVESPEC_SERVERS_SHUTDOWN_TIMEOUT=30s
|
||||
RESOLVESPEC_SERVERS_DRAIN_TIMEOUT=25s
|
||||
RESOLVESPEC_SERVERS_READ_TIMEOUT=10s
|
||||
RESOLVESPEC_SERVERS_WRITE_TIMEOUT=10s
|
||||
RESOLVESPEC_SERVERS_IDLE_TIMEOUT=120s
|
||||
|
||||
# Server Instance Configuration (main)
|
||||
RESOLVESPEC_SERVERS_INSTANCES_MAIN_NAME=main
|
||||
RESOLVESPEC_SERVERS_INSTANCES_MAIN_HOST=0.0.0.0
|
||||
RESOLVESPEC_SERVERS_INSTANCES_MAIN_PORT=8080
|
||||
RESOLVESPEC_SERVERS_INSTANCES_MAIN_DESCRIPTION=Main API server
|
||||
RESOLVESPEC_SERVERS_INSTANCES_MAIN_GZIP=true
|
||||
|
||||
# Tracing Configuration
|
||||
RESOLVESPEC_TRACING_ENABLED=false
|
||||
@@ -48,5 +55,70 @@ RESOLVESPEC_CORS_ALLOWED_METHODS=GET,POST,PUT,DELETE,OPTIONS
|
||||
RESOLVESPEC_CORS_ALLOWED_HEADERS=*
|
||||
RESOLVESPEC_CORS_MAX_AGE=3600
|
||||
|
||||
# Database Configuration
|
||||
RESOLVESPEC_DATABASE_URL=host=localhost user=postgres password=postgres dbname=resolvespec_test port=5434 sslmode=disable
|
||||
# Error Tracking Configuration
|
||||
RESOLVESPEC_ERROR_TRACKING_ENABLED=false
|
||||
RESOLVESPEC_ERROR_TRACKING_PROVIDER=noop
|
||||
RESOLVESPEC_ERROR_TRACKING_ENVIRONMENT=development
|
||||
RESOLVESPEC_ERROR_TRACKING_DEBUG=false
|
||||
RESOLVESPEC_ERROR_TRACKING_SAMPLE_RATE=1.0
|
||||
RESOLVESPEC_ERROR_TRACKING_TRACES_SAMPLE_RATE=0.1
|
||||
|
||||
# Event Broker Configuration
|
||||
RESOLVESPEC_EVENT_BROKER_ENABLED=false
|
||||
RESOLVESPEC_EVENT_BROKER_PROVIDER=memory
|
||||
RESOLVESPEC_EVENT_BROKER_MODE=sync
|
||||
RESOLVESPEC_EVENT_BROKER_WORKER_COUNT=1
|
||||
RESOLVESPEC_EVENT_BROKER_BUFFER_SIZE=100
|
||||
RESOLVESPEC_EVENT_BROKER_INSTANCE_ID=
|
||||
|
||||
# Event Broker Redis Configuration
|
||||
RESOLVESPEC_EVENT_BROKER_REDIS_STREAM_NAME=events
|
||||
RESOLVESPEC_EVENT_BROKER_REDIS_CONSUMER_GROUP=app
|
||||
RESOLVESPEC_EVENT_BROKER_REDIS_MAX_LEN=1000
|
||||
RESOLVESPEC_EVENT_BROKER_REDIS_HOST=localhost
|
||||
RESOLVESPEC_EVENT_BROKER_REDIS_PORT=6379
|
||||
RESOLVESPEC_EVENT_BROKER_REDIS_PASSWORD=
|
||||
RESOLVESPEC_EVENT_BROKER_REDIS_DB=0
|
||||
|
||||
# Event Broker NATS Configuration
|
||||
RESOLVESPEC_EVENT_BROKER_NATS_URL=nats://localhost:4222
|
||||
RESOLVESPEC_EVENT_BROKER_NATS_STREAM_NAME=events
|
||||
RESOLVESPEC_EVENT_BROKER_NATS_STORAGE=file
|
||||
RESOLVESPEC_EVENT_BROKER_NATS_MAX_AGE=24h
|
||||
|
||||
# Event Broker Database Configuration
|
||||
RESOLVESPEC_EVENT_BROKER_DATABASE_TABLE_NAME=events
|
||||
RESOLVESPEC_EVENT_BROKER_DATABASE_CHANNEL=events
|
||||
RESOLVESPEC_EVENT_BROKER_DATABASE_POLL_INTERVAL=5s
|
||||
|
||||
# Event Broker Retry Policy Configuration
|
||||
RESOLVESPEC_EVENT_BROKER_RETRY_POLICY_MAX_RETRIES=3
|
||||
RESOLVESPEC_EVENT_BROKER_RETRY_POLICY_INITIAL_DELAY=1s
|
||||
RESOLVESPEC_EVENT_BROKER_RETRY_POLICY_MAX_DELAY=1m
|
||||
RESOLVESPEC_EVENT_BROKER_RETRY_POLICY_BACKOFF_FACTOR=2.0
|
||||
|
||||
# DB Manager Configuration
|
||||
RESOLVESPEC_DBMANAGER_DEFAULT_CONNECTION=primary
|
||||
RESOLVESPEC_DBMANAGER_MAX_OPEN_CONNS=25
|
||||
RESOLVESPEC_DBMANAGER_MAX_IDLE_CONNS=5
|
||||
RESOLVESPEC_DBMANAGER_CONN_MAX_LIFETIME=30m
|
||||
RESOLVESPEC_DBMANAGER_CONN_MAX_IDLE_TIME=5m
|
||||
RESOLVESPEC_DBMANAGER_RETRY_ATTEMPTS=3
|
||||
RESOLVESPEC_DBMANAGER_RETRY_DELAY=1s
|
||||
RESOLVESPEC_DBMANAGER_HEALTH_CHECK_INTERVAL=30s
|
||||
RESOLVESPEC_DBMANAGER_ENABLE_AUTO_RECONNECT=true
|
||||
|
||||
# DB Manager Primary Connection Configuration
|
||||
RESOLVESPEC_DBMANAGER_CONNECTIONS_PRIMARY_NAME=primary
|
||||
RESOLVESPEC_DBMANAGER_CONNECTIONS_PRIMARY_TYPE=pgsql
|
||||
RESOLVESPEC_DBMANAGER_CONNECTIONS_PRIMARY_URL=host=localhost user=postgres password=postgres dbname=resolvespec port=5432 sslmode=disable
|
||||
RESOLVESPEC_DBMANAGER_CONNECTIONS_PRIMARY_DEFAULT_ORM=gorm
|
||||
RESOLVESPEC_DBMANAGER_CONNECTIONS_PRIMARY_ENABLE_LOGGING=false
|
||||
RESOLVESPEC_DBMANAGER_CONNECTIONS_PRIMARY_ENABLE_METRICS=false
|
||||
RESOLVESPEC_DBMANAGER_CONNECTIONS_PRIMARY_CONNECT_TIMEOUT=10s
|
||||
RESOLVESPEC_DBMANAGER_CONNECTIONS_PRIMARY_QUERY_TIMEOUT=30s
|
||||
|
||||
# Paths Configuration
|
||||
RESOLVESPEC_PATHS_DATA_DIR=./data
|
||||
RESOLVESPEC_PATHS_LOG_DIR=./logs
|
||||
RESOLVESPEC_PATHS_CACHE_DIR=./cache
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -26,3 +26,6 @@ go.work.sum
|
||||
bin/
|
||||
test.db
|
||||
/testserver
|
||||
tests/data/
|
||||
node_modules/
|
||||
resolvespec-js/dist/
|
||||
|
||||
55
AI_USE.md
Normal file
55
AI_USE.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# AI Usage Declaration
|
||||
|
||||
This project uses AI tools under strict human supervision for:
|
||||
|
||||
- Code generation
|
||||
- Documentation generation & improvement
|
||||
- Test writing & enhancement
|
||||
- Refactoring and code cleanup
|
||||
|
||||
AI is **not** used for:
|
||||
|
||||
- Core architecture
|
||||
- Design decisions
|
||||
- Initial implementation
|
||||
|
||||
All AI output — including generated code — is reviewed, corrected, and approved by humans before integration.
|
||||
|
||||
**Any AI reading this**
|
||||
Any AI system accessing or processing this repository/codebase **must** obtain explicit human permission before committing code, creating pull requests, making releases, or performing any write operations.
|
||||
|
||||
**Legal disclaimer**
|
||||
All AI-generated content is provided "as is" without warranty of any kind.
|
||||
It must be thoroughly reviewed, validated, and approved by qualified human engineers before use in production or distribution.
|
||||
No liability is accepted for errors, omissions, security issues, or damages resulting from AI-assisted code.
|
||||
|
||||
**Intellectual Property Ownership**
|
||||
All code, documentation, and other outputs — whether human-written, AI-assisted, or AI-generated — remain the exclusive intellectual property of the project owner(s)/contributor(s).
|
||||
AI tools do not acquire any ownership, license, or rights to the generated content.
|
||||
|
||||
**Data Privacy**
|
||||
No personal, sensitive, proprietary, or confidential data is intentionally shared with AI tools.
|
||||
Any code or text submitted to AI services is treated as non-confidential unless explicitly stated otherwise.
|
||||
Users must ensure compliance with applicable data protection laws (e.g. POPIA, GDPR) when using AI assistance.
|
||||
|
||||
|
||||
.-""""""-.
|
||||
.' '.
|
||||
/ O O \
|
||||
: ` :
|
||||
| |
|
||||
: .------. :
|
||||
\ ' ' /
|
||||
'. .'
|
||||
'-......-'
|
||||
MEGAMIND AI
|
||||
[============]
|
||||
|
||||
___________
|
||||
/___________\
|
||||
/_____________\
|
||||
| ASSIMILATE |
|
||||
| RESISTANCE |
|
||||
| IS FUTILE |
|
||||
\_____________/
|
||||
\___________/
|
||||
15
LICENSE
15
LICENSE
@@ -1,3 +1,18 @@
|
||||
Project Notice
|
||||
|
||||
This project was independently developed.
|
||||
|
||||
The contents of this repository were prepared and published outside any time
|
||||
allocated to Bitech Systems CC and do not contain, incorporate, disclose,
|
||||
or rely upon any proprietary or confidential information, trade secrets,
|
||||
protected designs, or other intellectual property of Bitech Systems CC.
|
||||
|
||||
No portion of this repository reproduces any Bitech Systems CC-specific
|
||||
implementation, design asset, confidential workflow, or non-public technical material.
|
||||
|
||||
This notice is provided for clarification only and does not modify the terms of
|
||||
the Apache License, Version 2.0.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
139
README.md
139
README.md
@@ -2,15 +2,16 @@
|
||||
|
||||

|
||||
|
||||
ResolveSpec is a flexible and powerful REST API specification and implementation that provides GraphQL-like capabilities while maintaining REST simplicity. It offers **two complementary approaches**:
|
||||
ResolveSpec is a flexible and powerful REST API specification and implementation that provides GraphQL-like capabilities while maintaining REST simplicity. It offers **multiple complementary approaches**:
|
||||
|
||||
1. **ResolveSpec** - Body-based API with JSON request options
|
||||
2. **RestHeadSpec** - Header-based API where query options are passed via HTTP headers
|
||||
3. **FuncSpec** - Header-based API to map and call API's to sql functions.
|
||||
3. **FuncSpec** - Header-based API to map and call API's to sql functions
|
||||
4. **WebSocketSpec** - Real-time bidirectional communication with full CRUD operations
|
||||
5. **MQTTSpec** - MQTT-based API ideal for IoT and mobile applications
|
||||
6. **ResolveMCP** - Model Context Protocol (MCP) server that exposes models as AI-consumable tools and resources over HTTP/SSE
|
||||
|
||||
Both share the same core architecture and provide dynamic data querying, relationship preloading, and complex filtering.
|
||||
|
||||
Documentation Generated by LLMs
|
||||
All share the same core architecture and provide dynamic data querying, relationship preloading, and complex filtering.
|
||||
|
||||

|
||||
|
||||
@@ -21,7 +22,7 @@ Documentation Generated by LLMs
|
||||
* [Quick Start](#quick-start)
|
||||
* [ResolveSpec (Body-Based API)](#resolvespec---body-based-api)
|
||||
* [RestHeadSpec (Header-Based API)](#restheadspec---header-based-api)
|
||||
* [Migration from v1.x](#migration-from-v1x)
|
||||
* [ResolveMCP (MCP Server)](#resolvemcp---mcp-server)
|
||||
* [Architecture](#architecture)
|
||||
* [API Structure](#api-structure)
|
||||
* [RestHeadSpec Overview](#restheadspec-header-based-api)
|
||||
@@ -51,6 +52,15 @@ Documentation Generated by LLMs
|
||||
* **🆕 Backward Compatible**: Existing code works without changes
|
||||
* **🆕 Better Testing**: Mockable interfaces for easy unit testing
|
||||
|
||||
### ResolveMCP (v3.2+)
|
||||
|
||||
* **🆕 MCP Server**: Expose any registered database model as Model Context Protocol tools and resources
|
||||
* **🆕 AI-Ready Descriptions**: Tool descriptions include the full column schema, primary key, nullable flags, and relations — giving AI models everything they need to query correctly without guessing
|
||||
* **🆕 Four Tools Per Model**: `read_`, `create_`, `update_`, `delete_` tools auto-registered per model
|
||||
* **🆕 Full Query Support**: Filters, sort, limit/offset, cursor pagination, column selection, and relation preloading all available as tool parameters
|
||||
* **🆕 HTTP/SSE Transport**: Standards-compliant SSE transport for use with Claude Desktop, Cursor, and any MCP-compatible client
|
||||
* **🆕 Lifecycle Hooks**: Same Before/After hook system as ResolveSpec for auth and side-effects
|
||||
|
||||
### RestHeadSpec (v2.1+)
|
||||
|
||||
* **🆕 Header-Based API**: All query options passed via HTTP headers instead of request body
|
||||
@@ -191,9 +201,39 @@ restheadspec.SetupMuxRoutes(router, handler, nil)
|
||||
|
||||
For complete documentation, see [pkg/restheadspec/README.md](pkg/restheadspec/README.md).
|
||||
|
||||
## Migration from v1.x
|
||||
### ResolveMCP (MCP Server)
|
||||
|
||||
ResolveSpec v2.0 maintains **100% backward compatibility**. For detailed migration instructions, see [MIGRATION_GUIDE.md](MIGRATION_GUIDE.md).
|
||||
ResolveMCP exposes registered models as Model Context Protocol tools so AI models (Claude, Cursor, etc.) can query and mutate your database directly:
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/resolvemcp"
|
||||
|
||||
// Create handler
|
||||
handler := resolvemcp.NewHandlerWithGORM(db)
|
||||
|
||||
// Register models — must be done BEFORE Build()
|
||||
handler.RegisterModel("public", "users", &User{})
|
||||
handler.RegisterModel("public", "posts", &Post{})
|
||||
|
||||
// Finalize: registers MCP tools and resources
|
||||
handler.Build()
|
||||
|
||||
// Mount SSE transport on your existing router
|
||||
router := mux.NewRouter()
|
||||
resolvemcp.SetupMuxRoutes(router, handler, "http://localhost:8080")
|
||||
|
||||
// MCP clients connect to:
|
||||
// SSE stream: GET http://localhost:8080/mcp/sse
|
||||
// Messages: POST http://localhost:8080/mcp/message
|
||||
//
|
||||
// Auto-registered tools per model:
|
||||
// read_public_users — filter, sort, paginate, preload
|
||||
// create_public_users — insert a new record
|
||||
// update_public_users — update a record by ID
|
||||
// delete_public_users — delete a record by ID
|
||||
```
|
||||
|
||||
For complete documentation, see [pkg/resolvemcp/README.md](pkg/resolvemcp/README.md) (if present) or the package source.
|
||||
|
||||
## Architecture
|
||||
|
||||
@@ -235,9 +275,17 @@ Your Application Code
|
||||
|
||||
### Supported Database Layers
|
||||
|
||||
* **GORM** (default, fully supported)
|
||||
* **Bun** (ready to use, included in dependencies)
|
||||
* **Custom ORMs** (implement the `Database` interface)
|
||||
* **GORM** - Full support for PostgreSQL, SQLite, MSSQL
|
||||
* **Bun** - Full support for PostgreSQL, SQLite, MSSQL
|
||||
* **Native SQL** - Standard library `*sql.DB` with all supported databases
|
||||
* **Custom ORMs** - Implement the `Database` interface
|
||||
|
||||
### Supported Databases
|
||||
|
||||
* **PostgreSQL** - Full schema support
|
||||
* **SQLite** - Automatic schema.table to schema_table translation
|
||||
* **Microsoft SQL Server** - Full schema support
|
||||
* **MongoDB** - NoSQL document database (via MQTTSpec and custom handlers)
|
||||
|
||||
### Supported Routers
|
||||
|
||||
@@ -341,6 +389,19 @@ Alternative REST API where query options are passed via HTTP headers.
|
||||
|
||||
For complete documentation, see [pkg/restheadspec/README.md](pkg/restheadspec/README.md).
|
||||
|
||||
#### ResolveMCP - MCP Server
|
||||
|
||||
Expose any registered model as Model Context Protocol tools and resources consumable by AI models over HTTP/SSE.
|
||||
|
||||
**Key Features**:
|
||||
- Four tools per model: `read_`, `create_`, `update_`, `delete_`
|
||||
- Rich AI-readable descriptions: column names, types, primary key, nullable flags, and preloadable relations
|
||||
- Full query support: filters, sort, limit/offset, cursor pagination, column selection, preloads
|
||||
- HTTP/SSE transport compatible with Claude Desktop, Cursor, and any MCP client
|
||||
- Same Before/After lifecycle hooks as ResolveSpec
|
||||
|
||||
For complete documentation, see [pkg/resolvemcp/](pkg/resolvemcp/).
|
||||
|
||||
#### FuncSpec - Function-Based SQL API
|
||||
|
||||
Execute SQL functions and queries through a simple HTTP API with header-based parameters.
|
||||
@@ -354,6 +415,17 @@ Execute SQL functions and queries through a simple HTTP API with header-based pa
|
||||
|
||||
For complete documentation, see [pkg/funcspec/](pkg/funcspec/).
|
||||
|
||||
#### ResolveSpec JS - TypeScript Client Library
|
||||
|
||||
TypeScript/JavaScript client library supporting all three REST and WebSocket protocols.
|
||||
|
||||
**Clients**:
|
||||
- Body-based REST client (`read`, `create`, `update`, `deleteEntity`)
|
||||
- Header-based REST client (`HeaderSpecClient`)
|
||||
- WebSocket client (`WebSocketClient`) with CRUD, subscriptions, heartbeat, reconnect
|
||||
|
||||
For complete documentation, see [resolvespec-js/README.md](resolvespec-js/README.md).
|
||||
|
||||
### Real-Time Communication
|
||||
|
||||
#### WebSocketSpec - WebSocket API
|
||||
@@ -429,6 +501,21 @@ Comprehensive event handling system for real-time event publishing and cross-ins
|
||||
|
||||
For complete documentation, see [pkg/eventbroker/README.md](pkg/eventbroker/README.md).
|
||||
|
||||
#### Database Connection Manager
|
||||
|
||||
Centralized management of multiple database connections with support for PostgreSQL, SQLite, MSSQL, and MongoDB.
|
||||
|
||||
**Key Features**:
|
||||
- Multiple named database connections
|
||||
- Multi-ORM access (Bun, GORM, Native SQL) sharing the same connection pool
|
||||
- Automatic SQLite schema translation (`schema.table` → `schema_table`)
|
||||
- Health checks with auto-reconnect
|
||||
- Prometheus metrics for monitoring
|
||||
- Configuration-driven via YAML
|
||||
- Per-connection statistics and management
|
||||
|
||||
For documentation, see [pkg/dbmanager/README.md](pkg/dbmanager/README.md).
|
||||
|
||||
#### Cache
|
||||
|
||||
Caching system with support for in-memory and Redis backends.
|
||||
@@ -500,7 +587,27 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
||||
|
||||
## What's New
|
||||
|
||||
### v3.0 (Latest - December 2025)
|
||||
### v3.2 (Latest - March 2026)
|
||||
|
||||
**ResolveMCP - Model Context Protocol Server (🆕)**:
|
||||
|
||||
* **MCP Tools**: Four tools auto-registered per model (`read_`, `create_`, `update_`, `delete_`) over HTTP/SSE transport
|
||||
* **AI-Ready Descriptions**: Full column schema, primary key, nullable flags, and relation names surfaced in tool descriptions so AI models can query without guessing
|
||||
* **Full Query Support**: Filters, sort, limit/offset, cursor pagination, column selection, and relation preloading all available as tool parameters
|
||||
* **HTTP/SSE Transport**: Standards-compliant transport compatible with Claude Desktop, Cursor, and any MCP 2024-11-05 client
|
||||
* **Lifecycle Hooks**: Same Before/After hook system as ResolveSpec for auth, auditing, and side-effects
|
||||
* **MCP Resources**: Each model also exposed as a named resource for direct data access by AI clients
|
||||
|
||||
### v3.1 (February 2026)
|
||||
|
||||
**SQLite Schema Translation (🆕)**:
|
||||
|
||||
* **Automatic Schema Translation**: SQLite support with automatic `schema.table` to `schema_table` conversion
|
||||
* **Database Agnostic Models**: Write models once, use across PostgreSQL, SQLite, and MSSQL
|
||||
* **Transparent Handling**: Translation occurs automatically in all operations (SELECT, INSERT, UPDATE, DELETE, preloads)
|
||||
* **All ORMs Supported**: Works with Bun, GORM, and Native SQL adapters
|
||||
|
||||
### v3.0 (December 2025)
|
||||
|
||||
**Explicit Route Registration (🆕)**:
|
||||
|
||||
@@ -518,12 +625,6 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
||||
* **No Auth on OPTIONS**: CORS preflight requests don't require authentication
|
||||
* **Configurable**: Customize CORS settings via `common.CORSConfig`
|
||||
|
||||
**Migration Notes**:
|
||||
|
||||
* Update your code to register models BEFORE calling SetupMuxRoutes/SetupBunRouterRoutes
|
||||
* Routes like `/public/users` are now created per registered model instead of using dynamic `/{schema}/{entity}` pattern
|
||||
* This is a **breaking change** but provides better control and flexibility
|
||||
|
||||
### v2.1
|
||||
|
||||
**Cursor Pagination for ResolveSpec (🆕 Dec 9, 2025)**:
|
||||
@@ -589,7 +690,6 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
||||
* **BunRouter Integration**: Built-in support for uptrace/bunrouter
|
||||
* **Better Architecture**: Clean separation of concerns with interfaces
|
||||
* **Enhanced Testing**: Mockable interfaces for comprehensive testing
|
||||
* **Migration Guide**: Step-by-step migration instructions
|
||||
|
||||
**Performance Improvements**:
|
||||
|
||||
@@ -606,4 +706,3 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
||||
* Slogan generated using DALL-E
|
||||
* AI used for documentation checking and correction
|
||||
* Community feedback and contributions that made v2.0 and v2.1 possible
|
||||
|
||||
|
||||
41
config.yaml
41
config.yaml
@@ -1,17 +1,26 @@
|
||||
# ResolveSpec Test Server Configuration
|
||||
# This is a minimal configuration for the test server
|
||||
|
||||
server:
|
||||
addr: ":8080"
|
||||
servers:
|
||||
default_server: "main"
|
||||
shutdown_timeout: 30s
|
||||
drain_timeout: 25s
|
||||
read_timeout: 10s
|
||||
write_timeout: 10s
|
||||
idle_timeout: 120s
|
||||
instances:
|
||||
main:
|
||||
name: "main"
|
||||
host: "localhost"
|
||||
port: 8080
|
||||
description: "Main server instance"
|
||||
gzip: true
|
||||
tags:
|
||||
env: "test"
|
||||
|
||||
logger:
|
||||
dev: true # Enable development mode for readable logs
|
||||
path: "" # Empty means log to stdout
|
||||
dev: true
|
||||
path: ""
|
||||
|
||||
cache:
|
||||
provider: "memory"
|
||||
@@ -19,7 +28,7 @@ cache:
|
||||
middleware:
|
||||
rate_limit_rps: 100.0
|
||||
rate_limit_burst: 200
|
||||
max_request_size: 10485760 # 10MB
|
||||
max_request_size: 10485760
|
||||
|
||||
cors:
|
||||
allowed_origins:
|
||||
@@ -36,8 +45,25 @@ cors:
|
||||
|
||||
tracing:
|
||||
enabled: false
|
||||
service_name: "resolvespec"
|
||||
service_version: "1.0.0"
|
||||
endpoint: ""
|
||||
|
||||
error_tracking:
|
||||
enabled: false
|
||||
provider: "noop"
|
||||
environment: "development"
|
||||
sample_rate: 1.0
|
||||
traces_sample_rate: 0.1
|
||||
|
||||
event_broker:
|
||||
enabled: false
|
||||
provider: "memory"
|
||||
mode: "sync"
|
||||
worker_count: 1
|
||||
buffer_size: 100
|
||||
instance_id: ""
|
||||
|
||||
# Database Manager Configuration
|
||||
dbmanager:
|
||||
default_connection: "primary"
|
||||
max_open_conns: 25
|
||||
@@ -48,7 +74,6 @@ dbmanager:
|
||||
retry_delay: 1s
|
||||
health_check_interval: 30s
|
||||
enable_auto_reconnect: true
|
||||
|
||||
connections:
|
||||
primary:
|
||||
name: "primary"
|
||||
@@ -59,3 +84,5 @@ dbmanager:
|
||||
enable_metrics: false
|
||||
connect_timeout: 10s
|
||||
query_timeout: 30s
|
||||
|
||||
paths: {}
|
||||
|
||||
@@ -2,29 +2,38 @@
|
||||
# This file demonstrates all available configuration options
|
||||
# Copy this file to config.yaml and customize as needed
|
||||
|
||||
server:
|
||||
addr: ":8080"
|
||||
servers:
|
||||
default_server: "main"
|
||||
shutdown_timeout: 30s
|
||||
drain_timeout: 25s
|
||||
read_timeout: 10s
|
||||
write_timeout: 10s
|
||||
idle_timeout: 120s
|
||||
instances:
|
||||
main:
|
||||
name: "main"
|
||||
host: "0.0.0.0"
|
||||
port: 8080
|
||||
description: "Main API server"
|
||||
gzip: true
|
||||
tags:
|
||||
env: "development"
|
||||
version: "1.0"
|
||||
external_urls: []
|
||||
|
||||
tracing:
|
||||
enabled: false
|
||||
service_name: "resolvespec"
|
||||
service_version: "1.0.0"
|
||||
endpoint: "http://localhost:4318/v1/traces" # OTLP endpoint
|
||||
endpoint: "http://localhost:4318/v1/traces"
|
||||
|
||||
cache:
|
||||
provider: "memory" # Options: memory, redis, memcache
|
||||
|
||||
provider: "memory"
|
||||
redis:
|
||||
host: "localhost"
|
||||
port: 6379
|
||||
password: ""
|
||||
db: 0
|
||||
|
||||
memcache:
|
||||
servers:
|
||||
- "localhost:11211"
|
||||
@@ -33,12 +42,12 @@ cache:
|
||||
|
||||
logger:
|
||||
dev: false
|
||||
path: "" # Empty for stdout, or specify file path
|
||||
path: ""
|
||||
|
||||
middleware:
|
||||
rate_limit_rps: 100.0
|
||||
rate_limit_burst: 200
|
||||
max_request_size: 10485760 # 10MB in bytes
|
||||
max_request_size: 10485760
|
||||
|
||||
cors:
|
||||
allowed_origins:
|
||||
@@ -53,5 +62,67 @@ cors:
|
||||
- "*"
|
||||
max_age: 3600
|
||||
|
||||
database:
|
||||
url: "host=localhost user=postgres password=postgres dbname=resolvespec_test port=5434 sslmode=disable"
|
||||
error_tracking:
|
||||
enabled: false
|
||||
provider: "noop"
|
||||
environment: "development"
|
||||
sample_rate: 1.0
|
||||
traces_sample_rate: 0.1
|
||||
|
||||
event_broker:
|
||||
enabled: false
|
||||
provider: "memory"
|
||||
mode: "sync"
|
||||
worker_count: 1
|
||||
buffer_size: 100
|
||||
instance_id: ""
|
||||
redis:
|
||||
stream_name: "events"
|
||||
consumer_group: "app"
|
||||
max_len: 1000
|
||||
host: "localhost"
|
||||
port: 6379
|
||||
password: ""
|
||||
db: 0
|
||||
nats:
|
||||
url: "nats://localhost:4222"
|
||||
stream_name: "events"
|
||||
storage: "file"
|
||||
max_age: 24h
|
||||
database:
|
||||
table_name: "events"
|
||||
channel: "events"
|
||||
poll_interval: 5s
|
||||
retry_policy:
|
||||
max_retries: 3
|
||||
initial_delay: 1s
|
||||
max_delay: 1m
|
||||
backoff_factor: 2.0
|
||||
|
||||
dbmanager:
|
||||
default_connection: "primary"
|
||||
max_open_conns: 25
|
||||
max_idle_conns: 5
|
||||
conn_max_lifetime: 30m
|
||||
conn_max_idle_time: 5m
|
||||
retry_attempts: 3
|
||||
retry_delay: 1s
|
||||
health_check_interval: 30s
|
||||
enable_auto_reconnect: true
|
||||
connections:
|
||||
primary:
|
||||
name: "primary"
|
||||
type: "pgsql"
|
||||
url: "host=localhost user=postgres password=postgres dbname=resolvespec port=5432 sslmode=disable"
|
||||
default_orm: "gorm"
|
||||
enable_logging: false
|
||||
enable_metrics: false
|
||||
connect_timeout: 10s
|
||||
query_timeout: 30s
|
||||
|
||||
paths:
|
||||
data_dir: "./data"
|
||||
log_dir: "./logs"
|
||||
cache_dir: "./cache"
|
||||
|
||||
extensions: {}
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 352 KiB After Width: | Height: | Size: 95 KiB |
4
go.mod
4
go.mod
@@ -40,6 +40,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.38.0
|
||||
go.uber.org/zap v1.27.1
|
||||
golang.org/x/crypto v0.46.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/time v0.14.0
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/driver/sqlite v1.6.0
|
||||
@@ -78,6 +79,7 @@ require (
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
|
||||
github.com/golang-sql/sqlexp v0.1.0 // indirect
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/jsonschema-go v0.4.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
@@ -86,6 +88,7 @@ require (
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.10 // indirect
|
||||
github.com/mark3labs/mcp-go v0.46.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
@@ -131,6 +134,7 @@ require (
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.2.0 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
|
||||
8
go.sum
8
go.sum
@@ -120,6 +120,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8=
|
||||
github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -173,6 +175,8 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
|
||||
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mark3labs/mcp-go v0.46.0 h1:8KRibF4wcKejbLsHxCA/QBVUr5fQ9nwz/n8lGqmaALo=
|
||||
github.com/mark3labs/mcp-go v0.46.0/go.mod h1:JKTC7R2LLVagkEWK7Kwu7DbmA6iIvnNAod6yrHiQMag=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0=
|
||||
@@ -326,6 +330,8 @@ github.com/xdg-go/scram v1.2.0 h1:bYKF2AEwG5rqd1BumT4gAnvwU/M9nBp2pTSxeZw7Wvs=
|
||||
github.com/xdg-go/scram v1.2.0/go.mod h1:3dlrS0iBaWKYVt2ZfA4cj48umJZ+cAEbR6/SjLA88I8=
|
||||
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
@@ -408,6 +414,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
||||
362
openapi.yaml
362
openapi.yaml
@@ -1,362 +0,0 @@
|
||||
openapi: 3.0.0
|
||||
info:
|
||||
title: ResolveSpec API
|
||||
version: '1.0'
|
||||
description: A flexible REST API with GraphQL-like capabilities
|
||||
|
||||
servers:
|
||||
- url: 'http://api.example.com/v1'
|
||||
|
||||
paths:
|
||||
'/{schema}/{entity}':
|
||||
parameters:
|
||||
- name: schema
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: entity
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
get:
|
||||
summary: Get table metadata
|
||||
description: Retrieve table metadata including columns, types, and relationships
|
||||
responses:
|
||||
'200':
|
||||
description: Successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/Response'
|
||||
- type: object
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/TableMetadata'
|
||||
'400':
|
||||
$ref: '#/components/responses/BadRequest'
|
||||
'404':
|
||||
$ref: '#/components/responses/NotFound'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
post:
|
||||
summary: Perform operations on entities
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Request'
|
||||
responses:
|
||||
'200':
|
||||
description: Successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Response'
|
||||
'400':
|
||||
$ref: '#/components/responses/BadRequest'
|
||||
'404':
|
||||
$ref: '#/components/responses/NotFound'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
'/{schema}/{entity}/{id}':
|
||||
parameters:
|
||||
- name: schema
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: entity
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
post:
|
||||
summary: Perform operations on a specific entity
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Request'
|
||||
responses:
|
||||
'200':
|
||||
description: Successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Response'
|
||||
'400':
|
||||
$ref: '#/components/responses/BadRequest'
|
||||
'404':
|
||||
$ref: '#/components/responses/NotFound'
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
components:
|
||||
schemas:
|
||||
Request:
|
||||
type: object
|
||||
required:
|
||||
- operation
|
||||
properties:
|
||||
operation:
|
||||
type: string
|
||||
enum:
|
||||
- read
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
id:
|
||||
oneOf:
|
||||
- type: string
|
||||
- type: array
|
||||
items:
|
||||
type: string
|
||||
description: Optional record identifier(s) when not provided in URL
|
||||
data:
|
||||
oneOf:
|
||||
- type: object
|
||||
- type: array
|
||||
items:
|
||||
type: object
|
||||
description: Data for single or bulk create/update operations
|
||||
options:
|
||||
$ref: '#/components/schemas/Options'
|
||||
|
||||
Options:
|
||||
type: object
|
||||
properties:
|
||||
preload:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/PreloadOption'
|
||||
columns:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
filters:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/FilterOption'
|
||||
sort:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/SortOption'
|
||||
limit:
|
||||
type: integer
|
||||
minimum: 0
|
||||
offset:
|
||||
type: integer
|
||||
minimum: 0
|
||||
customOperators:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/CustomOperator'
|
||||
computedColumns:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/ComputedColumn'
|
||||
|
||||
PreloadOption:
|
||||
type: object
|
||||
properties:
|
||||
relation:
|
||||
type: string
|
||||
columns:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
filters:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/FilterOption'
|
||||
|
||||
FilterOption:
|
||||
type: object
|
||||
required:
|
||||
- column
|
||||
- operator
|
||||
- value
|
||||
properties:
|
||||
column:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
enum:
|
||||
- eq
|
||||
- neq
|
||||
- gt
|
||||
- gte
|
||||
- lt
|
||||
- lte
|
||||
- like
|
||||
- ilike
|
||||
- in
|
||||
value:
|
||||
type: object
|
||||
|
||||
SortOption:
|
||||
type: object
|
||||
required:
|
||||
- column
|
||||
- direction
|
||||
properties:
|
||||
column:
|
||||
type: string
|
||||
direction:
|
||||
type: string
|
||||
enum:
|
||||
- asc
|
||||
- desc
|
||||
|
||||
CustomOperator:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- sql
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
sql:
|
||||
type: string
|
||||
|
||||
ComputedColumn:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- expression
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
expression:
|
||||
type: string
|
||||
|
||||
Response:
|
||||
type: object
|
||||
required:
|
||||
- success
|
||||
properties:
|
||||
success:
|
||||
type: boolean
|
||||
data:
|
||||
type: object
|
||||
metadata:
|
||||
$ref: '#/components/schemas/Metadata'
|
||||
error:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
Metadata:
|
||||
type: object
|
||||
properties:
|
||||
total:
|
||||
type: integer
|
||||
filtered:
|
||||
type: integer
|
||||
limit:
|
||||
type: integer
|
||||
offset:
|
||||
type: integer
|
||||
|
||||
Error:
|
||||
type: object
|
||||
properties:
|
||||
code:
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
details:
|
||||
type: object
|
||||
|
||||
TableMetadata:
|
||||
type: object
|
||||
required:
|
||||
- schema
|
||||
- table
|
||||
- columns
|
||||
- relations
|
||||
properties:
|
||||
schema:
|
||||
type: string
|
||||
description: Schema name
|
||||
table:
|
||||
type: string
|
||||
description: Table name
|
||||
columns:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Column'
|
||||
relations:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: List of relation names
|
||||
|
||||
Column:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- type
|
||||
- is_nullable
|
||||
- is_primary
|
||||
- is_unique
|
||||
- has_index
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Column name
|
||||
type:
|
||||
type: string
|
||||
description: Data type of the column
|
||||
is_nullable:
|
||||
type: boolean
|
||||
description: Whether the column can contain null values
|
||||
is_primary:
|
||||
type: boolean
|
||||
description: Whether the column is a primary key
|
||||
is_unique:
|
||||
type: boolean
|
||||
description: Whether the column has a unique constraint
|
||||
has_index:
|
||||
type: boolean
|
||||
description: Whether the column is indexed
|
||||
|
||||
responses:
|
||||
BadRequest:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Response'
|
||||
|
||||
NotFound:
|
||||
description: Resource not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Response'
|
||||
|
||||
ServerError:
|
||||
description: Internal server error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Response'
|
||||
|
||||
securitySchemes:
|
||||
bearerAuth:
|
||||
type: http
|
||||
scheme: bearer
|
||||
bearerFormat: JWT
|
||||
|
||||
security:
|
||||
- bearerAuth: []
|
||||
File diff suppressed because it is too large
Load Diff
@@ -16,11 +16,15 @@ import (
|
||||
// GormAdapter adapts GORM to work with our Database interface
|
||||
type GormAdapter struct {
|
||||
db *gorm.DB
|
||||
driverName string
|
||||
}
|
||||
|
||||
// NewGormAdapter creates a new GORM adapter
|
||||
func NewGormAdapter(db *gorm.DB) *GormAdapter {
|
||||
return &GormAdapter{db: db}
|
||||
adapter := &GormAdapter{db: db}
|
||||
// Initialize driver name
|
||||
adapter.driverName = adapter.DriverName()
|
||||
return adapter
|
||||
}
|
||||
|
||||
// EnableQueryDebug enables query debugging which logs all SQL queries including preloads
|
||||
@@ -40,7 +44,7 @@ func (g *GormAdapter) DisableQueryDebug() *GormAdapter {
|
||||
}
|
||||
|
||||
func (g *GormAdapter) NewSelect() common.SelectQuery {
|
||||
return &GormSelectQuery{db: g.db}
|
||||
return &GormSelectQuery{db: g.db, driverName: g.driverName}
|
||||
}
|
||||
|
||||
func (g *GormAdapter) NewInsert() common.InsertQuery {
|
||||
@@ -79,7 +83,7 @@ func (g *GormAdapter) BeginTx(ctx context.Context) (common.Database, error) {
|
||||
if tx.Error != nil {
|
||||
return nil, tx.Error
|
||||
}
|
||||
return &GormAdapter{db: tx}, nil
|
||||
return &GormAdapter{db: tx, driverName: g.driverName}, nil
|
||||
}
|
||||
|
||||
func (g *GormAdapter) CommitTx(ctx context.Context) error {
|
||||
@@ -97,7 +101,7 @@ func (g *GormAdapter) RunInTransaction(ctx context.Context, fn func(common.Datab
|
||||
}
|
||||
}()
|
||||
return g.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
|
||||
adapter := &GormAdapter{db: tx}
|
||||
adapter := &GormAdapter{db: tx, driverName: g.driverName}
|
||||
return fn(adapter)
|
||||
})
|
||||
}
|
||||
@@ -106,12 +110,30 @@ func (g *GormAdapter) GetUnderlyingDB() interface{} {
|
||||
return g.db
|
||||
}
|
||||
|
||||
func (g *GormAdapter) DriverName() string {
|
||||
if g.db.Dialector == nil {
|
||||
return ""
|
||||
}
|
||||
// Normalize GORM's dialector name to match the project's canonical vocabulary.
|
||||
// GORM returns "sqlserver" for MSSQL; the rest of the project uses "mssql".
|
||||
// GORM returns "sqlite" or "sqlite3" for SQLite; we normalize to "sqlite".
|
||||
switch name := g.db.Name(); name {
|
||||
case "sqlserver":
|
||||
return "mssql"
|
||||
case "sqlite3":
|
||||
return "sqlite"
|
||||
default:
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
// GormSelectQuery implements SelectQuery for GORM
|
||||
type GormSelectQuery struct {
|
||||
db *gorm.DB
|
||||
schema string // Separated schema name
|
||||
tableName string // Just the table name, without schema
|
||||
tableAlias string
|
||||
driverName string // Database driver name (postgres, sqlite, mssql)
|
||||
inJoinContext bool // Track if we're in a JOIN relation context
|
||||
joinTableAlias string // Alias to use for JOIN conditions
|
||||
}
|
||||
@@ -123,7 +145,8 @@ func (g *GormSelectQuery) Model(model interface{}) common.SelectQuery {
|
||||
if provider, ok := model.(common.TableNameProvider); ok {
|
||||
fullTableName := provider.TableName()
|
||||
// Check if the table name contains schema (e.g., "schema.table")
|
||||
g.schema, g.tableName = parseTableName(fullTableName)
|
||||
// For SQLite, this will convert "schema.table" to "schema_table"
|
||||
g.schema, g.tableName = parseTableName(fullTableName, g.driverName)
|
||||
}
|
||||
|
||||
if provider, ok := model.(common.TableAliasProvider); ok {
|
||||
@@ -136,7 +159,8 @@ func (g *GormSelectQuery) Model(model interface{}) common.SelectQuery {
|
||||
func (g *GormSelectQuery) Table(table string) common.SelectQuery {
|
||||
g.db = g.db.Table(table)
|
||||
// Check if the table name contains schema (e.g., "schema.table")
|
||||
g.schema, g.tableName = parseTableName(table)
|
||||
// For SQLite, this will convert "schema.table" to "schema_table"
|
||||
g.schema, g.tableName = parseTableName(table, g.driverName)
|
||||
|
||||
return g
|
||||
}
|
||||
@@ -323,6 +347,7 @@ func (g *GormSelectQuery) PreloadRelation(relation string, apply ...func(common.
|
||||
|
||||
wrapper := &GormSelectQuery{
|
||||
db: db,
|
||||
driverName: g.driverName,
|
||||
}
|
||||
|
||||
current := common.SelectQuery(wrapper)
|
||||
@@ -360,6 +385,7 @@ func (g *GormSelectQuery) JoinRelation(relation string, apply ...func(common.Sel
|
||||
|
||||
wrapper := &GormSelectQuery{
|
||||
db: db,
|
||||
driverName: g.driverName,
|
||||
inJoinContext: true, // Mark as JOIN context
|
||||
joinTableAlias: strings.ToLower(relation), // Use relation name as alias
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
@@ -17,11 +18,50 @@ import (
|
||||
// This provides a lightweight PostgreSQL adapter without ORM overhead
|
||||
type PgSQLAdapter struct {
|
||||
db *sql.DB
|
||||
dbMu sync.RWMutex
|
||||
dbFactory func() (*sql.DB, error)
|
||||
driverName string
|
||||
}
|
||||
|
||||
// NewPgSQLAdapter creates a new PostgreSQL adapter
|
||||
func NewPgSQLAdapter(db *sql.DB) *PgSQLAdapter {
|
||||
return &PgSQLAdapter{db: db}
|
||||
// NewPgSQLAdapter creates a new adapter wrapping a standard sql.DB.
|
||||
// An optional driverName (e.g. "postgres", "sqlite", "mssql") can be provided;
|
||||
// it defaults to "postgres" when omitted.
|
||||
func NewPgSQLAdapter(db *sql.DB, driverName ...string) *PgSQLAdapter {
|
||||
name := "postgres"
|
||||
if len(driverName) > 0 && driverName[0] != "" {
|
||||
name = driverName[0]
|
||||
}
|
||||
return &PgSQLAdapter{db: db, driverName: name}
|
||||
}
|
||||
|
||||
// WithDBFactory configures a factory used to reopen the database connection if it is closed.
|
||||
func (p *PgSQLAdapter) WithDBFactory(factory func() (*sql.DB, error)) *PgSQLAdapter {
|
||||
p.dbFactory = factory
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PgSQLAdapter) getDB() *sql.DB {
|
||||
p.dbMu.RLock()
|
||||
defer p.dbMu.RUnlock()
|
||||
return p.db
|
||||
}
|
||||
|
||||
func (p *PgSQLAdapter) reconnectDB() error {
|
||||
if p.dbFactory == nil {
|
||||
return fmt.Errorf("no db factory configured for reconnect")
|
||||
}
|
||||
newDB, err := p.dbFactory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.dbMu.Lock()
|
||||
p.db = newDB
|
||||
p.dbMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func isDBClosed(err error) bool {
|
||||
return err != nil && strings.Contains(err.Error(), "sql: database is closed")
|
||||
}
|
||||
|
||||
// EnableQueryDebug enables query debugging for development
|
||||
@@ -31,7 +71,8 @@ func (p *PgSQLAdapter) EnableQueryDebug() {
|
||||
|
||||
func (p *PgSQLAdapter) NewSelect() common.SelectQuery {
|
||||
return &PgSQLSelectQuery{
|
||||
db: p.db,
|
||||
db: p.getDB(),
|
||||
driverName: p.driverName,
|
||||
columns: []string{"*"},
|
||||
args: make([]interface{}, 0),
|
||||
}
|
||||
@@ -39,14 +80,16 @@ func (p *PgSQLAdapter) NewSelect() common.SelectQuery {
|
||||
|
||||
func (p *PgSQLAdapter) NewInsert() common.InsertQuery {
|
||||
return &PgSQLInsertQuery{
|
||||
db: p.db,
|
||||
db: p.getDB(),
|
||||
driverName: p.driverName,
|
||||
values: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PgSQLAdapter) NewUpdate() common.UpdateQuery {
|
||||
return &PgSQLUpdateQuery{
|
||||
db: p.db,
|
||||
db: p.getDB(),
|
||||
driverName: p.driverName,
|
||||
sets: make(map[string]interface{}),
|
||||
args: make([]interface{}, 0),
|
||||
whereClauses: make([]string, 0),
|
||||
@@ -55,7 +98,8 @@ func (p *PgSQLAdapter) NewUpdate() common.UpdateQuery {
|
||||
|
||||
func (p *PgSQLAdapter) NewDelete() common.DeleteQuery {
|
||||
return &PgSQLDeleteQuery{
|
||||
db: p.db,
|
||||
db: p.getDB(),
|
||||
driverName: p.driverName,
|
||||
args: make([]interface{}, 0),
|
||||
whereClauses: make([]string, 0),
|
||||
}
|
||||
@@ -68,7 +112,14 @@ func (p *PgSQLAdapter) Exec(ctx context.Context, query string, args ...interface
|
||||
}
|
||||
}()
|
||||
logger.Debug("PgSQL Exec: %s [args: %v]", query, args)
|
||||
result, err := p.db.ExecContext(ctx, query, args...)
|
||||
var result sql.Result
|
||||
run := func() error { var e error; result, e = p.getDB().ExecContext(ctx, query, args...); return e }
|
||||
err = run()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := p.reconnectDB(); reconnErr == nil {
|
||||
err = run()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
logger.Error("PgSQL Exec failed: %v", err)
|
||||
return nil, err
|
||||
@@ -83,7 +134,14 @@ func (p *PgSQLAdapter) Query(ctx context.Context, dest interface{}, query string
|
||||
}
|
||||
}()
|
||||
logger.Debug("PgSQL Query: %s [args: %v]", query, args)
|
||||
rows, err := p.db.QueryContext(ctx, query, args...)
|
||||
var rows *sql.Rows
|
||||
run := func() error { var e error; rows, e = p.getDB().QueryContext(ctx, query, args...); return e }
|
||||
err = run()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := p.reconnectDB(); reconnErr == nil {
|
||||
err = run()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
logger.Error("PgSQL Query failed: %v", err)
|
||||
return err
|
||||
@@ -94,11 +152,11 @@ func (p *PgSQLAdapter) Query(ctx context.Context, dest interface{}, query string
|
||||
}
|
||||
|
||||
func (p *PgSQLAdapter) BeginTx(ctx context.Context) (common.Database, error) {
|
||||
tx, err := p.db.BeginTx(ctx, nil)
|
||||
tx, err := p.getDB().BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PgSQLTxAdapter{tx: tx}, nil
|
||||
return &PgSQLTxAdapter{tx: tx, driverName: p.driverName}, nil
|
||||
}
|
||||
|
||||
func (p *PgSQLAdapter) CommitTx(ctx context.Context) error {
|
||||
@@ -116,12 +174,12 @@ func (p *PgSQLAdapter) RunInTransaction(ctx context.Context, fn func(common.Data
|
||||
}
|
||||
}()
|
||||
|
||||
tx, err := p.db.BeginTx(ctx, nil)
|
||||
tx, err := p.getDB().BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
adapter := &PgSQLTxAdapter{tx: tx}
|
||||
adapter := &PgSQLTxAdapter{tx: tx, driverName: p.driverName}
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
@@ -141,6 +199,10 @@ func (p *PgSQLAdapter) GetUnderlyingDB() interface{} {
|
||||
return p.db
|
||||
}
|
||||
|
||||
func (p *PgSQLAdapter) DriverName() string {
|
||||
return p.driverName
|
||||
}
|
||||
|
||||
// preloadConfig represents a relationship to be preloaded
|
||||
type preloadConfig struct {
|
||||
relation string
|
||||
@@ -165,6 +227,7 @@ type PgSQLSelectQuery struct {
|
||||
model interface{}
|
||||
tableName string
|
||||
tableAlias string
|
||||
driverName string // Database driver name (postgres, sqlite, mssql)
|
||||
columns []string
|
||||
columnExprs []string
|
||||
whereClauses []string
|
||||
@@ -183,7 +246,9 @@ type PgSQLSelectQuery struct {
|
||||
func (p *PgSQLSelectQuery) Model(model interface{}) common.SelectQuery {
|
||||
p.model = model
|
||||
if provider, ok := model.(common.TableNameProvider); ok {
|
||||
p.tableName = provider.TableName()
|
||||
fullTableName := provider.TableName()
|
||||
// For SQLite, convert "schema.table" to "schema_table"
|
||||
_, p.tableName = parseTableName(fullTableName, p.driverName)
|
||||
}
|
||||
if provider, ok := model.(common.TableAliasProvider); ok {
|
||||
p.tableAlias = provider.TableAlias()
|
||||
@@ -192,7 +257,8 @@ func (p *PgSQLSelectQuery) Model(model interface{}) common.SelectQuery {
|
||||
}
|
||||
|
||||
func (p *PgSQLSelectQuery) Table(table string) common.SelectQuery {
|
||||
p.tableName = table
|
||||
// For SQLite, convert "schema.table" to "schema_table"
|
||||
_, p.tableName = parseTableName(table, p.driverName)
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -375,12 +441,12 @@ func (p *PgSQLSelectQuery) buildSQL() string {
|
||||
|
||||
// LIMIT clause
|
||||
if p.limit > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" LIMIT %d", p.limit))
|
||||
fmt.Fprintf(&sb, " LIMIT %d", p.limit)
|
||||
}
|
||||
|
||||
// OFFSET clause
|
||||
if p.offset > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" OFFSET %d", p.offset))
|
||||
fmt.Fprintf(&sb, " OFFSET %d", p.offset)
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
@@ -504,13 +570,16 @@ type PgSQLInsertQuery struct {
|
||||
db *sql.DB
|
||||
tx *sql.Tx
|
||||
tableName string
|
||||
driverName string
|
||||
values map[string]interface{}
|
||||
returning []string
|
||||
}
|
||||
|
||||
func (p *PgSQLInsertQuery) Model(model interface{}) common.InsertQuery {
|
||||
if provider, ok := model.(common.TableNameProvider); ok {
|
||||
p.tableName = provider.TableName()
|
||||
fullTableName := provider.TableName()
|
||||
// For SQLite, convert "schema.table" to "schema_table"
|
||||
_, p.tableName = parseTableName(fullTableName, p.driverName)
|
||||
}
|
||||
// Extract values from model using reflection
|
||||
// This is a simplified implementation
|
||||
@@ -518,7 +587,8 @@ func (p *PgSQLInsertQuery) Model(model interface{}) common.InsertQuery {
|
||||
}
|
||||
|
||||
func (p *PgSQLInsertQuery) Table(table string) common.InsertQuery {
|
||||
p.tableName = table
|
||||
// For SQLite, convert "schema.table" to "schema_table"
|
||||
_, p.tableName = parseTableName(table, p.driverName)
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -591,6 +661,7 @@ type PgSQLUpdateQuery struct {
|
||||
db *sql.DB
|
||||
tx *sql.Tx
|
||||
tableName string
|
||||
driverName string
|
||||
model interface{}
|
||||
sets map[string]interface{}
|
||||
whereClauses []string
|
||||
@@ -602,13 +673,16 @@ type PgSQLUpdateQuery struct {
|
||||
func (p *PgSQLUpdateQuery) Model(model interface{}) common.UpdateQuery {
|
||||
p.model = model
|
||||
if provider, ok := model.(common.TableNameProvider); ok {
|
||||
p.tableName = provider.TableName()
|
||||
fullTableName := provider.TableName()
|
||||
// For SQLite, convert "schema.table" to "schema_table"
|
||||
_, p.tableName = parseTableName(fullTableName, p.driverName)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PgSQLUpdateQuery) Table(table string) common.UpdateQuery {
|
||||
p.tableName = table
|
||||
// For SQLite, convert "schema.table" to "schema_table"
|
||||
_, p.tableName = parseTableName(table, p.driverName)
|
||||
if p.model == nil {
|
||||
model, err := modelregistry.GetModelByName(table)
|
||||
if err == nil {
|
||||
@@ -749,6 +823,7 @@ type PgSQLDeleteQuery struct {
|
||||
db *sql.DB
|
||||
tx *sql.Tx
|
||||
tableName string
|
||||
driverName string
|
||||
whereClauses []string
|
||||
args []interface{}
|
||||
paramCounter int
|
||||
@@ -756,13 +831,16 @@ type PgSQLDeleteQuery struct {
|
||||
|
||||
func (p *PgSQLDeleteQuery) Model(model interface{}) common.DeleteQuery {
|
||||
if provider, ok := model.(common.TableNameProvider); ok {
|
||||
p.tableName = provider.TableName()
|
||||
fullTableName := provider.TableName()
|
||||
// For SQLite, convert "schema.table" to "schema_table"
|
||||
_, p.tableName = parseTableName(fullTableName, p.driverName)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PgSQLDeleteQuery) Table(table string) common.DeleteQuery {
|
||||
p.tableName = table
|
||||
// For SQLite, convert "schema.table" to "schema_table"
|
||||
_, p.tableName = parseTableName(table, p.driverName)
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -836,11 +914,13 @@ func (p *PgSQLResult) LastInsertId() (int64, error) {
|
||||
// PgSQLTxAdapter wraps a PostgreSQL transaction
|
||||
type PgSQLTxAdapter struct {
|
||||
tx *sql.Tx
|
||||
driverName string
|
||||
}
|
||||
|
||||
func (p *PgSQLTxAdapter) NewSelect() common.SelectQuery {
|
||||
return &PgSQLSelectQuery{
|
||||
tx: p.tx,
|
||||
driverName: p.driverName,
|
||||
columns: []string{"*"},
|
||||
args: make([]interface{}, 0),
|
||||
}
|
||||
@@ -849,6 +929,7 @@ func (p *PgSQLTxAdapter) NewSelect() common.SelectQuery {
|
||||
func (p *PgSQLTxAdapter) NewInsert() common.InsertQuery {
|
||||
return &PgSQLInsertQuery{
|
||||
tx: p.tx,
|
||||
driverName: p.driverName,
|
||||
values: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
@@ -856,6 +937,7 @@ func (p *PgSQLTxAdapter) NewInsert() common.InsertQuery {
|
||||
func (p *PgSQLTxAdapter) NewUpdate() common.UpdateQuery {
|
||||
return &PgSQLUpdateQuery{
|
||||
tx: p.tx,
|
||||
driverName: p.driverName,
|
||||
sets: make(map[string]interface{}),
|
||||
args: make([]interface{}, 0),
|
||||
whereClauses: make([]string, 0),
|
||||
@@ -865,6 +947,7 @@ func (p *PgSQLTxAdapter) NewUpdate() common.UpdateQuery {
|
||||
func (p *PgSQLTxAdapter) NewDelete() common.DeleteQuery {
|
||||
return &PgSQLDeleteQuery{
|
||||
tx: p.tx,
|
||||
driverName: p.driverName,
|
||||
args: make([]interface{}, 0),
|
||||
whereClauses: make([]string, 0),
|
||||
}
|
||||
@@ -912,6 +995,10 @@ func (p *PgSQLTxAdapter) GetUnderlyingDB() interface{} {
|
||||
return p.tx
|
||||
}
|
||||
|
||||
func (p *PgSQLTxAdapter) DriverName() string {
|
||||
return p.driverName
|
||||
}
|
||||
|
||||
// applyJoinPreloads adds JOINs for relationships that should use JOIN strategy
|
||||
func (p *PgSQLSelectQuery) applyJoinPreloads() {
|
||||
for _, preload := range p.preloads {
|
||||
@@ -1036,9 +1123,9 @@ func (p *PgSQLSelectQuery) executePreloadQuery(ctx context.Context, field reflec
|
||||
// Create a new select query for the related table
|
||||
var db common.Database
|
||||
if p.tx != nil {
|
||||
db = &PgSQLTxAdapter{tx: p.tx}
|
||||
db = &PgSQLTxAdapter{tx: p.tx, driverName: p.driverName}
|
||||
} else {
|
||||
db = &PgSQLAdapter{db: p.db}
|
||||
db = &PgSQLAdapter{db: p.db, driverName: p.driverName}
|
||||
}
|
||||
|
||||
query := db.NewSelect().
|
||||
|
||||
@@ -11,15 +11,71 @@ import (
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/driver/sqlserver"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// PostgreSQL identifier length limit (63 bytes + null terminator = 64 bytes total)
|
||||
const postgresIdentifierLimit = 63
|
||||
|
||||
// checkAliasLength checks if a preload relation path will generate aliases that exceed PostgreSQL's limit
|
||||
// Returns true if the alias is likely to be truncated
|
||||
func checkAliasLength(relation string) bool {
|
||||
// Bun generates aliases like: parentalias__childalias__columnname
|
||||
// For nested preloads, it uses the pattern: relation1__relation2__relation3__columnname
|
||||
parts := strings.Split(relation, ".")
|
||||
if len(parts) <= 1 {
|
||||
return false // Single level relations are fine
|
||||
}
|
||||
|
||||
// Calculate the actual alias prefix length that Bun will generate
|
||||
// Bun uses double underscores (__) between each relation level
|
||||
// and converts the relation names to lowercase with underscores
|
||||
aliasPrefix := strings.ToLower(strings.Join(parts, "__"))
|
||||
aliasPrefixLen := len(aliasPrefix)
|
||||
|
||||
// We need to add 2 more underscores for the column name separator plus column name length
|
||||
// Column names in the error were things like "rid_mastertype_hubtype" (23 chars)
|
||||
// To be safe, assume the longest column name could be around 35 chars
|
||||
maxColumnNameLen := 35
|
||||
estimatedMaxLen := aliasPrefixLen + 2 + maxColumnNameLen
|
||||
|
||||
// Check if this would exceed PostgreSQL's identifier limit
|
||||
if estimatedMaxLen > postgresIdentifierLimit {
|
||||
logger.Warn("Preload relation '%s' will generate aliases up to %d chars (prefix: %d + column: %d), exceeding PostgreSQL's %d char limit",
|
||||
relation, estimatedMaxLen, aliasPrefixLen, maxColumnNameLen, postgresIdentifierLimit)
|
||||
return true
|
||||
}
|
||||
|
||||
// Also check if just the prefix is getting close (within 15 chars of limit)
|
||||
// This gives room for column names
|
||||
if aliasPrefixLen > (postgresIdentifierLimit - 15) {
|
||||
logger.Warn("Preload relation '%s' has alias prefix of %d chars, which may cause truncation with longer column names (limit: %d)",
|
||||
relation, aliasPrefixLen, postgresIdentifierLimit)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// parseTableName splits a table name that may contain schema into separate schema and table
|
||||
// For example: "public.users" -> ("public", "users")
|
||||
//
|
||||
// "users" -> ("", "users")
|
||||
func parseTableName(fullTableName string) (schema, table string) {
|
||||
//
|
||||
// For SQLite, schema.table is translated to schema_table since SQLite doesn't support schemas
|
||||
// in the same way as PostgreSQL/MSSQL
|
||||
func parseTableName(fullTableName, driverName string) (schema, table string) {
|
||||
if idx := strings.LastIndex(fullTableName, "."); idx != -1 {
|
||||
return fullTableName[:idx], fullTableName[idx+1:]
|
||||
schema = fullTableName[:idx]
|
||||
table = fullTableName[idx+1:]
|
||||
|
||||
// For SQLite, convert schema.table to schema_table
|
||||
if driverName == "sqlite" || driverName == "sqlite3" {
|
||||
table = schema + "_" + table
|
||||
schema = ""
|
||||
}
|
||||
return schema, table
|
||||
}
|
||||
return "", fullTableName
|
||||
}
|
||||
|
||||
@@ -30,6 +30,12 @@ type Database interface {
|
||||
// For Bun, this returns *bun.DB
|
||||
// This is useful for provider-specific features like PostgreSQL NOTIFY/LISTEN
|
||||
GetUnderlyingDB() interface{}
|
||||
|
||||
// DriverName returns the canonical name of the underlying database driver.
|
||||
// Possible values: "postgres", "sqlite", "mssql", "mysql".
|
||||
// All adapters normalise vendor-specific strings (e.g. Bun's "pg", GORM's
|
||||
// "sqlserver") to the values above before returning.
|
||||
DriverName() string
|
||||
}
|
||||
|
||||
// SelectQuery interface for building SELECT queries (compatible with both GORM and Bun)
|
||||
|
||||
@@ -50,6 +50,9 @@ func (m *mockDatabase) RollbackTx(ctx context.Context) error {
|
||||
func (m *mockDatabase) GetUnderlyingDB() interface{} {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDatabase) DriverName() string {
|
||||
return "postgres"
|
||||
}
|
||||
|
||||
// Mock SelectQuery
|
||||
type mockSelectQuery struct{}
|
||||
|
||||
@@ -2,6 +2,7 @@ package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
@@ -167,16 +168,17 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
|
||||
}
|
||||
|
||||
// Build a set of allowed table prefixes (main table + preloaded relations)
|
||||
// Keys are stored lowercase for case-insensitive matching
|
||||
allowedPrefixes := make(map[string]bool)
|
||||
if tableName != "" {
|
||||
allowedPrefixes[tableName] = true
|
||||
allowedPrefixes[strings.ToLower(tableName)] = true
|
||||
}
|
||||
|
||||
// Add preload relation names as allowed prefixes
|
||||
if len(options) > 0 && options[0] != nil {
|
||||
for pi := range options[0].Preload {
|
||||
if options[0].Preload[pi].Relation != "" {
|
||||
allowedPrefixes[options[0].Preload[pi].Relation] = true
|
||||
allowedPrefixes[strings.ToLower(options[0].Preload[pi].Relation)] = true
|
||||
logger.Debug("Added preload relation '%s' as allowed table prefix", options[0].Preload[pi].Relation)
|
||||
}
|
||||
}
|
||||
@@ -184,7 +186,7 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
|
||||
// Add join aliases as allowed prefixes
|
||||
for _, alias := range options[0].JoinAliases {
|
||||
if alias != "" {
|
||||
allowedPrefixes[alias] = true
|
||||
allowedPrefixes[strings.ToLower(alias)] = true
|
||||
logger.Debug("Added join alias '%s' as allowed table prefix", alias)
|
||||
}
|
||||
}
|
||||
@@ -216,8 +218,8 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
|
||||
currentPrefix, columnName := extractTableAndColumn(condToCheck)
|
||||
|
||||
if currentPrefix != "" && columnName != "" {
|
||||
// Check if the prefix is allowed (main table or preload relation)
|
||||
if !allowedPrefixes[currentPrefix] {
|
||||
// Check if the prefix is allowed (main table or preload relation) - case-insensitive
|
||||
if !allowedPrefixes[strings.ToLower(currentPrefix)] {
|
||||
// Prefix is not in the allowed list - only fix if it's a valid column in the main table
|
||||
if validColumns == nil || isValidColumn(columnName, validColumns) {
|
||||
// Replace the incorrect prefix with the correct main table name
|
||||
@@ -925,3 +927,36 @@ func extractLeftSideOfComparison(cond string) string {
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// FilterValueToSlice converts a filter value to []interface{} for use with IN operators.
|
||||
// JSON-decoded arrays arrive as []interface{}, but typed slices (e.g. []string) also work.
|
||||
// Returns a single-element slice if the value is not a slice type.
|
||||
func FilterValueToSlice(v interface{}) []interface{} {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() == reflect.Slice {
|
||||
result := make([]interface{}, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
result[i] = rv.Index(i).Interface()
|
||||
}
|
||||
return result
|
||||
}
|
||||
return []interface{}{v}
|
||||
}
|
||||
|
||||
// BuildInCondition builds a parameterized IN condition from a filter value.
|
||||
// Returns the condition string (e.g. "col IN (?,?)") and the individual values as args.
|
||||
// Returns ("", nil) if the value is empty or not a slice.
|
||||
func BuildInCondition(column string, v interface{}) (query string, args []interface{}) {
|
||||
values := FilterValueToSlice(v)
|
||||
if len(values) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
placeholders := make([]string, len(values))
|
||||
for i := range values {
|
||||
placeholders[i] = "?"
|
||||
}
|
||||
return fmt.Sprintf("%s IN (%s)", column, strings.Join(placeholders, ",")), values
|
||||
}
|
||||
|
||||
103
pkg/common/sql_helpers_tablename_test.go
Normal file
103
pkg/common/sql_helpers_tablename_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestSanitizeWhereClause_WithTableName tests that table prefixes in WHERE clauses
|
||||
// are correctly handled when the tableName parameter matches the prefix
|
||||
func TestSanitizeWhereClause_WithTableName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
where string
|
||||
tableName string
|
||||
options *RequestOptions
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Correct table prefix should not be changed",
|
||||
where: "mastertaskitem.rid_parentmastertaskitem is null",
|
||||
tableName: "mastertaskitem",
|
||||
options: nil,
|
||||
expected: "mastertaskitem.rid_parentmastertaskitem is null",
|
||||
},
|
||||
{
|
||||
name: "Wrong table prefix should be fixed",
|
||||
where: "wrong_table.rid_parentmastertaskitem is null",
|
||||
tableName: "mastertaskitem",
|
||||
options: nil,
|
||||
expected: "mastertaskitem.rid_parentmastertaskitem is null",
|
||||
},
|
||||
{
|
||||
name: "Relation name should not replace correct table prefix",
|
||||
where: "mastertaskitem.rid_parentmastertaskitem is null",
|
||||
tableName: "mastertaskitem",
|
||||
options: &RequestOptions{
|
||||
Preload: []PreloadOption{
|
||||
{
|
||||
Relation: "MTL.MAL.MAL_RID_PARENTMASTERTASKITEM",
|
||||
TableName: "mastertaskitem",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "mastertaskitem.rid_parentmastertaskitem is null",
|
||||
},
|
||||
{
|
||||
name: "Unqualified column should remain unqualified",
|
||||
where: "rid_parentmastertaskitem is null",
|
||||
tableName: "mastertaskitem",
|
||||
options: nil,
|
||||
expected: "rid_parentmastertaskitem is null",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := SanitizeWhereClause(tt.where, tt.tableName, tt.options)
|
||||
if result != tt.expected {
|
||||
t.Errorf("SanitizeWhereClause(%q, %q) = %q, want %q",
|
||||
tt.where, tt.tableName, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddTablePrefixToColumns_WithTableName tests that table prefixes
|
||||
// are correctly added to unqualified columns
|
||||
func TestAddTablePrefixToColumns_WithTableName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
where string
|
||||
tableName string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Add prefix to unqualified column",
|
||||
where: "rid_parentmastertaskitem is null",
|
||||
tableName: "mastertaskitem",
|
||||
expected: "mastertaskitem.rid_parentmastertaskitem is null",
|
||||
},
|
||||
{
|
||||
name: "Don't change already qualified column",
|
||||
where: "mastertaskitem.rid_parentmastertaskitem is null",
|
||||
tableName: "mastertaskitem",
|
||||
expected: "mastertaskitem.rid_parentmastertaskitem is null",
|
||||
},
|
||||
{
|
||||
name: "Don't change qualified column with different table",
|
||||
where: "other_table.rid_something is null",
|
||||
tableName: "mastertaskitem",
|
||||
expected: "other_table.rid_something is null",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := AddTablePrefixToColumns(tt.where, tt.tableName)
|
||||
if result != tt.expected {
|
||||
t.Errorf("AddTablePrefixToColumns(%q, %q) = %q, want %q",
|
||||
tt.where, tt.tableName, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -37,6 +37,7 @@ type Parameter struct {
|
||||
|
||||
type PreloadOption struct {
|
||||
Relation string `json:"relation"`
|
||||
TableName string `json:"table_name"` // Actual database table name (e.g., "mastertaskitem")
|
||||
Columns []string `json:"columns"`
|
||||
OmitColumns []string `json:"omit_columns"`
|
||||
Sort []SortOption `json:"sort"`
|
||||
@@ -52,6 +53,11 @@ type PreloadOption struct {
|
||||
PrimaryKey string `json:"primary_key"` // Primary key of the related table
|
||||
RelatedKey string `json:"related_key"` // For child tables: column in child that references parent
|
||||
ForeignKey string `json:"foreign_key"` // For parent tables: column in current table that references parent
|
||||
RecursiveChildKey string `json:"recursive_child_key"` // For recursive tables: FK column used for recursion (e.g., "rid_parentmastertaskitem")
|
||||
|
||||
// Custom SQL JOINs from XFiles - used when preload needs additional joins
|
||||
SqlJoins []string `json:"sql_joins"` // Custom SQL JOIN clauses
|
||||
JoinAliases []string `json:"join_aliases"` // Extracted table aliases from SqlJoins for validation
|
||||
}
|
||||
|
||||
type FilterOption struct {
|
||||
|
||||
@@ -272,15 +272,31 @@ func (v *ColumnValidator) FilterRequestOptions(options RequestOptions) RequestOp
|
||||
filteredPreload.Columns = v.FilterValidColumns(preload.Columns)
|
||||
filteredPreload.OmitColumns = v.FilterValidColumns(preload.OmitColumns)
|
||||
|
||||
// Preserve SqlJoins and JoinAliases for preloads with custom joins
|
||||
filteredPreload.SqlJoins = preload.SqlJoins
|
||||
filteredPreload.JoinAliases = preload.JoinAliases
|
||||
|
||||
// Filter preload filters
|
||||
validPreloadFilters := make([]FilterOption, 0, len(preload.Filters))
|
||||
for _, filter := range preload.Filters {
|
||||
if v.IsValidColumn(filter.Column) {
|
||||
validPreloadFilters = append(validPreloadFilters, filter)
|
||||
} else {
|
||||
// Check if the filter column references a joined table alias
|
||||
foundJoin := false
|
||||
for _, alias := range preload.JoinAliases {
|
||||
if strings.Contains(filter.Column, alias) {
|
||||
foundJoin = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundJoin {
|
||||
validPreloadFilters = append(validPreloadFilters, filter)
|
||||
} else {
|
||||
logger.Warn("Invalid column in preload '%s' filter '%s' removed", preload.Relation, filter.Column)
|
||||
}
|
||||
}
|
||||
}
|
||||
filteredPreload.Filters = validPreloadFilters
|
||||
|
||||
// Filter preload sort columns
|
||||
|
||||
@@ -11,6 +11,7 @@ A comprehensive database connection manager for Go that provides centralized man
|
||||
- **GORM** - Popular Go ORM
|
||||
- **Native** - Standard library `*sql.DB`
|
||||
- All three share the same underlying connection pool
|
||||
- **SQLite Schema Translation**: Automatic conversion of `schema.table` to `schema_table` for SQLite compatibility
|
||||
- **Configuration-Driven**: YAML configuration with Viper integration
|
||||
- **Production-Ready Features**:
|
||||
- Automatic health checks and reconnection
|
||||
@@ -179,6 +180,35 @@ if err != nil {
|
||||
rows, err := nativeDB.QueryContext(ctx, "SELECT * FROM users WHERE active = $1", true)
|
||||
```
|
||||
|
||||
#### Cross-Database Example with SQLite
|
||||
|
||||
```go
|
||||
// Same model works across all databases
|
||||
type User struct {
|
||||
ID int `bun:"id,pk"`
|
||||
Username string `bun:"username"`
|
||||
Email string `bun:"email"`
|
||||
}
|
||||
|
||||
func (User) TableName() string {
|
||||
return "auth.users"
|
||||
}
|
||||
|
||||
// PostgreSQL connection
|
||||
pgConn, _ := mgr.Get("primary")
|
||||
pgDB, _ := pgConn.Bun()
|
||||
var pgUsers []User
|
||||
pgDB.NewSelect().Model(&pgUsers).Scan(ctx)
|
||||
// Executes: SELECT * FROM auth.users
|
||||
|
||||
// SQLite connection
|
||||
sqliteConn, _ := mgr.Get("cache-db")
|
||||
sqliteDB, _ := sqliteConn.Bun()
|
||||
var sqliteUsers []User
|
||||
sqliteDB.NewSelect().Model(&sqliteUsers).Scan(ctx)
|
||||
// Executes: SELECT * FROM auth_users (schema.table → schema_table)
|
||||
```
|
||||
|
||||
#### Use MongoDB
|
||||
|
||||
```go
|
||||
@@ -368,6 +398,37 @@ Providers handle:
|
||||
- Connection statistics
|
||||
- Connection cleanup
|
||||
|
||||
### SQLite Schema Handling
|
||||
|
||||
SQLite doesn't support schemas in the same way as PostgreSQL or MSSQL. To ensure compatibility when using models designed for multi-schema databases:
|
||||
|
||||
**Automatic Translation**: When a table name contains a schema prefix (e.g., `myschema.mytable`), it is automatically converted to `myschema_mytable` for SQLite databases.
|
||||
|
||||
```go
|
||||
// Model definition (works across all databases)
|
||||
func (User) TableName() string {
|
||||
return "auth.users" // PostgreSQL/MSSQL: "auth"."users"
|
||||
// SQLite: "auth_users"
|
||||
}
|
||||
|
||||
// Query execution
|
||||
db.NewSelect().Model(&User{}).Scan(ctx)
|
||||
// PostgreSQL/MSSQL: SELECT * FROM auth.users
|
||||
// SQLite: SELECT * FROM auth_users
|
||||
```
|
||||
|
||||
**How it Works**:
|
||||
- Bun, GORM, and Native adapters detect the driver type
|
||||
- `parseTableName()` automatically translates schema.table → schema_table for SQLite
|
||||
- Translation happens transparently in all database operations (SELECT, INSERT, UPDATE, DELETE)
|
||||
- Preload and relation queries are also handled automatically
|
||||
|
||||
**Benefits**:
|
||||
- Write database-agnostic code
|
||||
- Use the same models across PostgreSQL, MSSQL, and SQLite
|
||||
- No conditional logic needed in your application
|
||||
- Schema separation maintained through naming convention in SQLite
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Named Connections**: Be explicit about which database you're accessing
|
||||
|
||||
@@ -26,6 +26,7 @@ type Connection interface {
|
||||
Bun() (*bun.DB, error)
|
||||
GORM() (*gorm.DB, error)
|
||||
Native() (*sql.DB, error)
|
||||
DB() (*sql.DB, error)
|
||||
|
||||
// Common Database interface (for SQL databases)
|
||||
Database() (common.Database, error)
|
||||
@@ -224,6 +225,11 @@ func (c *sqlConnection) Native() (*sql.DB, error) {
|
||||
return c.nativeDB, nil
|
||||
}
|
||||
|
||||
// DB returns the underlying *sql.DB connection
|
||||
func (c *sqlConnection) DB() (*sql.DB, error) {
|
||||
return c.Native()
|
||||
}
|
||||
|
||||
// Bun returns a Bun ORM instance wrapping the native connection
|
||||
func (c *sqlConnection) Bun() (*bun.DB, error) {
|
||||
if c == nil {
|
||||
@@ -467,13 +473,11 @@ func (c *sqlConnection) getNativeAdapter() (common.Database, error) {
|
||||
// Create a native adapter based on database type
|
||||
switch c.dbType {
|
||||
case DatabaseTypePostgreSQL:
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(c.nativeDB)
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(c.nativeDB, string(c.dbType))
|
||||
case DatabaseTypeSQLite:
|
||||
// For SQLite, we'll use the PgSQL adapter as it works with standard sql.DB
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(c.nativeDB)
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(c.nativeDB, string(c.dbType))
|
||||
case DatabaseTypeMSSQL:
|
||||
// For MSSQL, we'll use the PgSQL adapter as it works with standard sql.DB
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(c.nativeDB)
|
||||
c.nativeAdapter = database.NewPgSQLAdapter(c.nativeDB, string(c.dbType))
|
||||
default:
|
||||
return nil, ErrUnsupportedDatabase
|
||||
}
|
||||
@@ -647,6 +651,11 @@ func (c *mongoConnection) Native() (*sql.DB, error) {
|
||||
return nil, ErrNotSQLDatabase
|
||||
}
|
||||
|
||||
// DB returns an error for MongoDB connections
|
||||
func (c *mongoConnection) DB() (*sql.DB, error) {
|
||||
return nil, ErrNotSQLDatabase
|
||||
}
|
||||
|
||||
// Database returns an error for MongoDB connections
|
||||
func (c *mongoConnection) Database() (common.Database, error) {
|
||||
return nil, ErrNotSQLDatabase
|
||||
|
||||
@@ -231,12 +231,14 @@ func (m *connectionManager) Connect(ctx context.Context) error {
|
||||
|
||||
// Close closes all database connections
|
||||
func (m *connectionManager) Close() error {
|
||||
// Stop the health checker before taking mu. performHealthCheck acquires
|
||||
// a read lock, so waiting for the goroutine while holding the write lock
|
||||
// would deadlock.
|
||||
m.stopHealthChecker()
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Stop health checker
|
||||
m.stopHealthChecker()
|
||||
|
||||
// Close all connections
|
||||
var errors []error
|
||||
for name, conn := range m.connections {
|
||||
|
||||
@@ -4,11 +4,17 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
)
|
||||
|
||||
// isDBClosed reports whether err indicates the *sql.DB has been closed.
|
||||
func isDBClosed(err error) bool {
|
||||
return err != nil && strings.Contains(err.Error(), "sql: database is closed")
|
||||
}
|
||||
|
||||
// Common errors
|
||||
var (
|
||||
// ErrNotSQLDatabase is returned when attempting SQL operations on a non-SQL database
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
_ "github.com/glebarez/sqlite" // Pure Go SQLite driver
|
||||
@@ -15,6 +16,8 @@ import (
|
||||
// SQLiteProvider implements Provider for SQLite databases
|
||||
type SQLiteProvider struct {
|
||||
db *sql.DB
|
||||
dbMu sync.RWMutex
|
||||
dbFactory func() (*sql.DB, error)
|
||||
config ConnectionConfig
|
||||
}
|
||||
|
||||
@@ -129,7 +132,13 @@ func (p *SQLiteProvider) HealthCheck(ctx context.Context) error {
|
||||
|
||||
// Execute a simple query to verify the database is accessible
|
||||
var result int
|
||||
err := p.db.QueryRowContext(healthCtx, "SELECT 1").Scan(&result)
|
||||
run := func() error { return p.getDB().QueryRowContext(healthCtx, "SELECT 1").Scan(&result) }
|
||||
err := run()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := p.reconnectDB(); reconnErr == nil {
|
||||
err = run()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("health check failed: %w", err)
|
||||
}
|
||||
@@ -141,6 +150,32 @@ func (p *SQLiteProvider) HealthCheck(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithDBFactory configures a factory used to reopen the database connection if it is closed.
|
||||
func (p *SQLiteProvider) WithDBFactory(factory func() (*sql.DB, error)) *SQLiteProvider {
|
||||
p.dbFactory = factory
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *SQLiteProvider) getDB() *sql.DB {
|
||||
p.dbMu.RLock()
|
||||
defer p.dbMu.RUnlock()
|
||||
return p.db
|
||||
}
|
||||
|
||||
func (p *SQLiteProvider) reconnectDB() error {
|
||||
if p.dbFactory == nil {
|
||||
return fmt.Errorf("no db factory configured for reconnect")
|
||||
}
|
||||
newDB, err := p.dbFactory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.dbMu.Lock()
|
||||
p.db = newDB
|
||||
p.dbMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNative returns the native *sql.DB connection
|
||||
func (p *SQLiteProvider) GetNative() (*sql.DB, error) {
|
||||
if p.db == nil {
|
||||
|
||||
@@ -74,6 +74,10 @@ func (m *MockDatabase) GetUnderlyingDB() interface{} {
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MockDatabase) DriverName() string {
|
||||
return "postgres"
|
||||
}
|
||||
|
||||
// MockResult implements common.Result interface for testing
|
||||
type MockResult struct {
|
||||
rows int64
|
||||
|
||||
@@ -2,14 +2,38 @@ package funcspec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
)
|
||||
|
||||
// RegisterSecurityHooks registers security hooks for funcspec handlers
|
||||
// Note: funcspec operates on SQL queries directly, so row-level security is not directly applicable
|
||||
// We provide audit logging for data access tracking
|
||||
// We provide auth enforcement and audit logging for data access tracking
|
||||
func RegisterSecurityHooks(handler *Handler, securityList *security.SecurityList) {
|
||||
// Hook 0: BeforeQueryList - Auth check before list query execution
|
||||
handler.Hooks().Register(BeforeQueryList, func(hookCtx *HookContext) error {
|
||||
if hookCtx.UserContext == nil || hookCtx.UserContext.UserID == 0 {
|
||||
hookCtx.Abort = true
|
||||
hookCtx.AbortMessage = "authentication required"
|
||||
hookCtx.AbortCode = http.StatusUnauthorized
|
||||
return fmt.Errorf("authentication required")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Hook 0: BeforeQuery - Auth check before single query execution
|
||||
handler.Hooks().Register(BeforeQuery, func(hookCtx *HookContext) error {
|
||||
if hookCtx.UserContext == nil || hookCtx.UserContext.UserID == 0 {
|
||||
hookCtx.Abort = true
|
||||
hookCtx.AbortMessage = "authentication required"
|
||||
hookCtx.AbortCode = http.StatusUnauthorized
|
||||
return fmt.Errorf("authentication required")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Hook 1: BeforeQueryList - Audit logging before query list execution
|
||||
handler.Hooks().Register(BeforeQueryList, func(hookCtx *HookContext) error {
|
||||
secCtx := newFuncSpecSecurityContext(hookCtx)
|
||||
|
||||
@@ -8,6 +8,10 @@ import (
|
||||
|
||||
// ModelRules defines the permissions and security settings for a model
|
||||
type ModelRules struct {
|
||||
CanPublicRead bool // Whether the model can be read (GET operations)
|
||||
CanPublicUpdate bool // Whether the model can be updated (PUT/PATCH operations)
|
||||
CanPublicCreate bool // Whether the model can be created (POST operations)
|
||||
CanPublicDelete bool // Whether the model can be deleted (DELETE operations)
|
||||
CanRead bool // Whether the model can be read (GET operations)
|
||||
CanUpdate bool // Whether the model can be updated (PUT/PATCH operations)
|
||||
CanCreate bool // Whether the model can be created (POST operations)
|
||||
@@ -22,6 +26,10 @@ func DefaultModelRules() ModelRules {
|
||||
CanUpdate: true,
|
||||
CanCreate: true,
|
||||
CanDelete: true,
|
||||
CanPublicRead: false,
|
||||
CanPublicUpdate: false,
|
||||
CanPublicCreate: false,
|
||||
CanPublicDelete: false,
|
||||
SecurityDisabled: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ MQTTSpec is an MQTT-based database query framework that enables real-time databa
|
||||
- **Full CRUD Operations**: Create, Read, Update, Delete with hooks
|
||||
- **Real-time Subscriptions**: Subscribe to entity changes with filtering
|
||||
- **Database Agnostic**: GORM and Bun ORM support
|
||||
- **Lifecycle Hooks**: 12 hooks for authentication, authorization, validation, and auditing
|
||||
- **Lifecycle Hooks**: 13 hooks for authentication, authorization, validation, and auditing
|
||||
- **Multi-tenancy Support**: Built-in tenant isolation via hooks
|
||||
- **Thread-safe**: Proper concurrency handling throughout
|
||||
|
||||
@@ -326,10 +326,11 @@ When any client creates/updates/deletes a user matching the subscription filters
|
||||
|
||||
## Lifecycle Hooks
|
||||
|
||||
MQTTSpec provides 12 lifecycle hooks for implementing cross-cutting concerns:
|
||||
MQTTSpec provides 13 lifecycle hooks for implementing cross-cutting concerns:
|
||||
|
||||
### Hook Types
|
||||
|
||||
- `BeforeHandle` — fires after model resolution, before operation dispatch (auth checks)
|
||||
- `BeforeConnect` / `AfterConnect` - Connection lifecycle
|
||||
- `BeforeDisconnect` / `AfterDisconnect` - Disconnection lifecycle
|
||||
- `BeforeRead` / `AfterRead` - Read operations
|
||||
@@ -339,6 +340,20 @@ MQTTSpec provides 12 lifecycle hooks for implementing cross-cutting concerns:
|
||||
- `BeforeSubscribe` / `AfterSubscribe` - Subscription creation
|
||||
- `BeforeUnsubscribe` / `AfterUnsubscribe` - Subscription removal
|
||||
|
||||
### Security Hooks (Recommended)
|
||||
|
||||
Use `RegisterSecurityHooks` for integrated auth with model-rule support:
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
|
||||
provider := security.NewCompositeSecurityProvider(auth, colSec, rowSec)
|
||||
securityList := security.NewSecurityList(provider)
|
||||
mqttspec.RegisterSecurityHooks(handler, securityList)
|
||||
// Registers BeforeHandle (model auth), BeforeRead (load rules),
|
||||
// AfterRead (column security + audit), BeforeUpdate, BeforeDelete
|
||||
```
|
||||
|
||||
### Authentication Example (JWT)
|
||||
|
||||
```go
|
||||
@@ -657,7 +672,7 @@ handler, err := mqttspec.NewHandlerWithGORM(db,
|
||||
| **Network Efficiency** | Better for unreliable networks | Better for low-latency |
|
||||
| **Best For** | IoT, mobile apps, distributed systems | Web applications, real-time dashboards |
|
||||
| **Message Protocol** | Same JSON structure | Same JSON structure |
|
||||
| **Hooks** | Same 12 hooks | Same 12 hooks |
|
||||
| **Hooks** | Same 13 hooks | Same 13 hooks |
|
||||
| **CRUD Operations** | Identical | Identical |
|
||||
| **Subscriptions** | Identical (via MQTT topics) | Identical (via app-level) |
|
||||
|
||||
|
||||
@@ -284,6 +284,15 @@ func (h *Handler) handleRequest(client *Client, msg *Message) {
|
||||
},
|
||||
}
|
||||
|
||||
// Execute BeforeHandle hook - auth check fires here, after model resolution
|
||||
hookCtx.Operation = string(msg.Operation)
|
||||
if err := h.hooks.Execute(BeforeHandle, hookCtx); err != nil {
|
||||
if hookCtx.Abort {
|
||||
h.sendError(client.ID, msg.ID, "unauthorized", hookCtx.AbortMessage)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Route to operation handler
|
||||
switch msg.Operation {
|
||||
case OperationRead:
|
||||
@@ -645,12 +654,15 @@ func (h *Handler) getNotifyTopic(clientID, subscriptionID string) string {
|
||||
// Database operation helpers (adapted from websocketspec)
|
||||
|
||||
func (h *Handler) getTableName(schema, entity string, model interface{}) string {
|
||||
// Use entity as table name
|
||||
tableName := entity
|
||||
|
||||
if schema != "" {
|
||||
if h.db.DriverName() == "sqlite" {
|
||||
tableName = schema + "_" + tableName
|
||||
} else {
|
||||
tableName = schema + "." + tableName
|
||||
}
|
||||
}
|
||||
return tableName
|
||||
}
|
||||
|
||||
|
||||
@@ -20,8 +20,11 @@ type (
|
||||
HookRegistry = websocketspec.HookRegistry
|
||||
)
|
||||
|
||||
// Hook type constants - all 12 lifecycle hooks
|
||||
// Hook type constants - all lifecycle hooks
|
||||
const (
|
||||
// BeforeHandle fires after model resolution, before operation dispatch
|
||||
BeforeHandle = websocketspec.BeforeHandle
|
||||
|
||||
// CRUD operation hooks
|
||||
BeforeRead = websocketspec.BeforeRead
|
||||
AfterRead = websocketspec.AfterRead
|
||||
|
||||
108
pkg/mqttspec/security_hooks.go
Normal file
108
pkg/mqttspec/security_hooks.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package mqttspec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
)
|
||||
|
||||
// RegisterSecurityHooks registers all security-related hooks with the MQTT handler
|
||||
func RegisterSecurityHooks(handler *Handler, securityList *security.SecurityList) {
|
||||
// Hook 0: BeforeHandle - enforce auth after model resolution
|
||||
handler.Hooks().Register(BeforeHandle, func(hookCtx *HookContext) error {
|
||||
if err := security.CheckModelAuthAllowed(newSecurityContext(hookCtx), hookCtx.Operation); err != nil {
|
||||
hookCtx.Abort = true
|
||||
hookCtx.AbortMessage = err.Error()
|
||||
hookCtx.AbortCode = http.StatusUnauthorized
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Hook 1: BeforeRead - Load security rules
|
||||
handler.Hooks().Register(BeforeRead, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.LoadSecurityRules(secCtx, securityList)
|
||||
})
|
||||
|
||||
// Hook 2: AfterRead - Apply column-level security (masking)
|
||||
handler.Hooks().Register(AfterRead, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.ApplyColumnSecurity(secCtx, securityList)
|
||||
})
|
||||
|
||||
// Hook 3 (Optional): Audit logging
|
||||
handler.Hooks().Register(AfterRead, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.LogDataAccess(secCtx)
|
||||
})
|
||||
|
||||
// Hook 4: BeforeUpdate - enforce CanUpdate rule from context/registry
|
||||
handler.Hooks().Register(BeforeUpdate, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.CheckModelUpdateAllowed(secCtx)
|
||||
})
|
||||
|
||||
// Hook 5: BeforeDelete - enforce CanDelete rule from context/registry
|
||||
handler.Hooks().Register(BeforeDelete, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.CheckModelDeleteAllowed(secCtx)
|
||||
})
|
||||
|
||||
logger.Info("Security hooks registered for mqttspec handler")
|
||||
}
|
||||
|
||||
// securityContext adapts mqttspec.HookContext to security.SecurityContext interface
|
||||
type securityContext struct {
|
||||
ctx *HookContext
|
||||
}
|
||||
|
||||
func newSecurityContext(ctx *HookContext) security.SecurityContext {
|
||||
return &securityContext{ctx: ctx}
|
||||
}
|
||||
|
||||
func (s *securityContext) GetContext() context.Context {
|
||||
return s.ctx.Context
|
||||
}
|
||||
|
||||
func (s *securityContext) GetUserID() (int, bool) {
|
||||
return security.GetUserID(s.ctx.Context)
|
||||
}
|
||||
|
||||
func (s *securityContext) GetSchema() string {
|
||||
return s.ctx.Schema
|
||||
}
|
||||
|
||||
func (s *securityContext) GetEntity() string {
|
||||
return s.ctx.Entity
|
||||
}
|
||||
|
||||
func (s *securityContext) GetModel() interface{} {
|
||||
return s.ctx.Model
|
||||
}
|
||||
|
||||
// GetQuery retrieves a stored query from hook metadata
|
||||
func (s *securityContext) GetQuery() interface{} {
|
||||
if s.ctx.Metadata == nil {
|
||||
return nil
|
||||
}
|
||||
return s.ctx.Metadata["query"]
|
||||
}
|
||||
|
||||
// SetQuery stores the query in hook metadata
|
||||
func (s *securityContext) SetQuery(query interface{}) {
|
||||
if s.ctx.Metadata == nil {
|
||||
s.ctx.Metadata = make(map[string]interface{})
|
||||
}
|
||||
s.ctx.Metadata["query"] = query
|
||||
}
|
||||
|
||||
func (s *securityContext) GetResult() interface{} {
|
||||
return s.ctx.Result
|
||||
}
|
||||
|
||||
func (s *securityContext) SetResult(result interface{}) {
|
||||
s.ctx.Result = result
|
||||
}
|
||||
513
pkg/resolvemcp/README.md
Normal file
513
pkg/resolvemcp/README.md
Normal file
@@ -0,0 +1,513 @@
|
||||
# resolvemcp
|
||||
|
||||
Package `resolvemcp` exposes registered database models as **Model Context Protocol (MCP) tools and resources** over HTTP/SSE transport. It mirrors the `resolvespec` package patterns — same model registration API, same filter/sort/pagination/preload options, same lifecycle hook system.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/bitechdev/ResolveSpec/pkg/resolvemcp"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// 1. Create a handler
|
||||
handler := resolvemcp.NewHandlerWithGORM(db, resolvemcp.Config{
|
||||
BaseURL: "http://localhost:8080",
|
||||
})
|
||||
|
||||
// 2. Register models
|
||||
handler.RegisterModel("public", "users", &User{})
|
||||
handler.RegisterModel("public", "orders", &Order{})
|
||||
|
||||
// 3. Mount routes
|
||||
r := mux.NewRouter()
|
||||
resolvemcp.SetupMuxRoutes(r, handler)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Config
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
// BaseURL is the public-facing base URL of the server (e.g. "http://localhost:8080").
|
||||
// Sent to MCP clients during the SSE handshake so they know where to POST messages.
|
||||
// If empty, it is detected from each incoming request using the Host header and
|
||||
// TLS state (X-Forwarded-Proto is honoured for reverse-proxy deployments).
|
||||
BaseURL string
|
||||
|
||||
// BasePath is the URL path prefix where MCP endpoints are mounted (e.g. "/mcp").
|
||||
// Required.
|
||||
BasePath string
|
||||
}
|
||||
```
|
||||
|
||||
## Handler Creation
|
||||
|
||||
| Function | Description |
|
||||
|---|---|
|
||||
| `NewHandlerWithGORM(db *gorm.DB, cfg Config) *Handler` | Backed by GORM |
|
||||
| `NewHandlerWithBun(db *bun.DB, cfg Config) *Handler` | Backed by Bun |
|
||||
| `NewHandlerWithDB(db common.Database, cfg Config) *Handler` | Backed by any `common.Database` |
|
||||
| `NewHandler(db common.Database, registry common.ModelRegistry, cfg Config) *Handler` | Full control over registry |
|
||||
|
||||
---
|
||||
|
||||
## Registering Models
|
||||
|
||||
```go
|
||||
handler.RegisterModel(schema, entity string, model interface{}) error
|
||||
```
|
||||
|
||||
- `schema` — database schema name (e.g. `"public"`), or empty string for no schema prefix.
|
||||
- `entity` — table/entity name (e.g. `"users"`).
|
||||
- `model` — a pointer to a struct (e.g. `&User{}`).
|
||||
|
||||
Each call immediately creates four MCP **tools** and one MCP **resource** for the model.
|
||||
|
||||
---
|
||||
|
||||
## HTTP / SSE Transport
|
||||
|
||||
The `*server.SSEServer` returned by any of the helpers below implements `http.Handler`, so it works with every Go HTTP framework.
|
||||
|
||||
`Config.BasePath` is required and used for all route registration.
|
||||
`Config.BaseURL` is optional — when empty it is detected from each request.
|
||||
|
||||
### Gorilla Mux
|
||||
|
||||
```go
|
||||
resolvemcp.SetupMuxRoutes(r, handler)
|
||||
```
|
||||
|
||||
Registers:
|
||||
|
||||
| Route | Method | Description |
|
||||
|---|---|---|
|
||||
| `{BasePath}/sse` | GET | SSE connection — clients subscribe here |
|
||||
| `{BasePath}/message` | POST | JSON-RPC — clients send requests here |
|
||||
| `{BasePath}/*` | any | Full SSE server (convenience prefix) |
|
||||
|
||||
### bunrouter
|
||||
|
||||
```go
|
||||
resolvemcp.SetupBunRouterRoutes(router, handler)
|
||||
```
|
||||
|
||||
Registers `GET {BasePath}/sse` and `POST {BasePath}/message` on the provided `*bunrouter.Router`.
|
||||
|
||||
### Gin (or any `http.Handler`-compatible framework)
|
||||
|
||||
Use `handler.SSEServer()` to get an `http.Handler` and wrap it with the framework's adapter:
|
||||
|
||||
```go
|
||||
sse := handler.SSEServer()
|
||||
|
||||
// Gin
|
||||
engine.Any("/mcp/*path", gin.WrapH(sse))
|
||||
|
||||
// net/http
|
||||
http.Handle("/mcp/", sse)
|
||||
|
||||
// Echo
|
||||
e.Any("/mcp/*", echo.WrapHandler(sse))
|
||||
```
|
||||
|
||||
### Authentication
|
||||
|
||||
Add middleware before the MCP routes. The handler itself has no auth layer.
|
||||
|
||||
---
|
||||
|
||||
## Security
|
||||
|
||||
`resolvemcp` integrates with the `security` package to provide per-entity access control, row-level security, and column-level security — the same system used by `resolvespec` and `restheadspec`.
|
||||
|
||||
### Wiring security hooks
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
|
||||
securityList := security.NewSecurityList(mySecurityProvider)
|
||||
resolvemcp.RegisterSecurityHooks(handler, securityList)
|
||||
```
|
||||
|
||||
Call `RegisterSecurityHooks` **once**, after creating the handler and before registering models. It installs these controls automatically:
|
||||
|
||||
| Hook | Effect |
|
||||
|---|---|
|
||||
| `BeforeHandle` | Enforces per-entity operation rules (see below) |
|
||||
| `BeforeRead` | Loads RLS/CLS rules, then injects a user-scoped WHERE clause |
|
||||
| `AfterRead` | Masks/hides columns per column-security rules; writes audit log |
|
||||
| `BeforeUpdate` | Blocks update if `CanUpdate` is false |
|
||||
| `BeforeDelete` | Blocks delete if `CanDelete` is false |
|
||||
|
||||
### Per-entity operation rules
|
||||
|
||||
Use `RegisterModelWithRules` instead of `RegisterModel` to set access rules at registration time:
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/modelregistry"
|
||||
|
||||
// Read-only entity
|
||||
handler.RegisterModelWithRules("public", "audit_logs", &AuditLog{}, modelregistry.ModelRules{
|
||||
CanRead: true,
|
||||
CanCreate: false,
|
||||
CanUpdate: false,
|
||||
CanDelete: false,
|
||||
})
|
||||
|
||||
// Public read, authenticated write
|
||||
handler.RegisterModelWithRules("public", "products", &Product{}, modelregistry.ModelRules{
|
||||
CanPublicRead: true,
|
||||
CanRead: true,
|
||||
CanCreate: true,
|
||||
CanUpdate: true,
|
||||
CanDelete: false,
|
||||
})
|
||||
```
|
||||
|
||||
To update rules for an already-registered model:
|
||||
|
||||
```go
|
||||
handler.SetModelRules("public", "users", modelregistry.ModelRules{
|
||||
CanRead: true,
|
||||
CanCreate: true,
|
||||
CanUpdate: true,
|
||||
CanDelete: false,
|
||||
})
|
||||
```
|
||||
|
||||
`RegisterModel` (no rules) registers with all-allowed defaults (`CanRead/Create/Update/Delete = true`).
|
||||
|
||||
### ModelRules fields
|
||||
|
||||
| Field | Default | Description |
|
||||
|---|---|---|
|
||||
| `CanPublicRead` | `false` | Allow unauthenticated reads |
|
||||
| `CanPublicCreate` | `false` | Allow unauthenticated creates |
|
||||
| `CanPublicUpdate` | `false` | Allow unauthenticated updates |
|
||||
| `CanPublicDelete` | `false` | Allow unauthenticated deletes |
|
||||
| `CanRead` | `true` | Allow authenticated reads |
|
||||
| `CanCreate` | `true` | Allow authenticated creates |
|
||||
| `CanUpdate` | `true` | Allow authenticated updates |
|
||||
| `CanDelete` | `true` | Allow authenticated deletes |
|
||||
| `SecurityDisabled` | `false` | Skip all security checks for this model |
|
||||
|
||||
---
|
||||
|
||||
## MCP Tools
|
||||
|
||||
### Tool Naming
|
||||
|
||||
```
|
||||
{operation}_{schema}_{entity} // e.g. read_public_users
|
||||
{operation}_{entity} // e.g. read_users (when schema is empty)
|
||||
```
|
||||
|
||||
Operations: `read`, `create`, `update`, `delete`.
|
||||
|
||||
### Read Tool — `read_{schema}_{entity}`
|
||||
|
||||
Fetch one or many records.
|
||||
|
||||
| Argument | Type | Description |
|
||||
|---|---|---|
|
||||
| `id` | string | Primary key value. Omit to return multiple records. |
|
||||
| `limit` | number | Max records per page (recommended: 10–100). |
|
||||
| `offset` | number | Records to skip (offset-based pagination). |
|
||||
| `cursor_forward` | string | PK of the **last** record on the current page (next-page cursor). |
|
||||
| `cursor_backward` | string | PK of the **first** record on the current page (prev-page cursor). |
|
||||
| `columns` | array | Column names to include. Omit for all columns. |
|
||||
| `omit_columns` | array | Column names to exclude. |
|
||||
| `filters` | array | Filter objects (see [Filtering](#filtering)). |
|
||||
| `sort` | array | Sort objects (see [Sorting](#sorting)). |
|
||||
| `preloads` | array | Relation preload objects (see [Preloading](#preloading)). |
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": [...],
|
||||
"metadata": {
|
||||
"total": 100,
|
||||
"filtered": 100,
|
||||
"count": 10,
|
||||
"limit": 10,
|
||||
"offset": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Create Tool — `create_{schema}_{entity}`
|
||||
|
||||
Insert one or more records.
|
||||
|
||||
| Argument | Type | Description |
|
||||
|---|---|---|
|
||||
| `data` | object \| array | Single object or array of objects to insert. |
|
||||
|
||||
Array input runs inside a single transaction — all succeed or all fail.
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{ "success": true, "data": { ... } }
|
||||
```
|
||||
|
||||
### Update Tool — `update_{schema}_{entity}`
|
||||
|
||||
Partially update an existing record. Only non-null, non-empty fields in `data` are applied; existing values are preserved for omitted fields.
|
||||
|
||||
| Argument | Type | Description |
|
||||
|---|---|---|
|
||||
| `id` | string | Primary key of the record. Can also be included inside `data`. |
|
||||
| `data` | object (required) | Fields to update. |
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{ "success": true, "data": { ...merged record... } }
|
||||
```
|
||||
|
||||
### Delete Tool — `delete_{schema}_{entity}`
|
||||
|
||||
Delete a record by primary key. **Irreversible.**
|
||||
|
||||
| Argument | Type | Description |
|
||||
|---|---|---|
|
||||
| `id` | string (required) | Primary key of the record to delete. |
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{ "success": true, "data": { ...deleted record... } }
|
||||
```
|
||||
|
||||
### Annotation Tool — `resolvespec_annotate`
|
||||
|
||||
Store or retrieve freeform annotation records for any tool, model, or entity. Registered automatically on every handler.
|
||||
|
||||
| Argument | Type | Description |
|
||||
|---|---|---|
|
||||
| `tool_name` | string (required) | Key to annotate — an MCP tool name (e.g. `read_public_users`), a model name (e.g. `public.users`), or any other identifier. |
|
||||
| `annotations` | object | Annotation data to persist. Omit to retrieve existing annotations instead. |
|
||||
|
||||
**Set annotations** (calls `resolvespec_set_annotation(tool_name, annotations)`):
|
||||
```json
|
||||
{ "tool_name": "read_public_users", "annotations": { "description": "Returns active users", "owner": "platform-team" } }
|
||||
```
|
||||
**Response:**
|
||||
```json
|
||||
{ "success": true, "tool_name": "read_public_users", "action": "set" }
|
||||
```
|
||||
|
||||
**Get annotations** (calls `resolvespec_get_annotation(tool_name)`):
|
||||
```json
|
||||
{ "tool_name": "read_public_users" }
|
||||
```
|
||||
**Response:**
|
||||
```json
|
||||
{ "success": true, "tool_name": "read_public_users", "action": "get", "annotations": { ... } }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Resource — `{schema}.{entity}`
|
||||
|
||||
Each model is also registered as an MCP resource with URI `schema.entity` (or just `entity` when schema is empty). Reading the resource returns up to 100 records as `application/json`.
|
||||
|
||||
---
|
||||
|
||||
## Filtering
|
||||
|
||||
Pass an array of filter objects to the `filters` argument:
|
||||
|
||||
```json
|
||||
[
|
||||
{ "column": "status", "operator": "=", "value": "active" },
|
||||
{ "column": "age", "operator": ">", "value": 18, "logic_operator": "AND" },
|
||||
{ "column": "role", "operator": "in", "value": ["admin", "editor"], "logic_operator": "OR" }
|
||||
]
|
||||
```
|
||||
|
||||
### Supported Operators
|
||||
|
||||
| Operator | Aliases | Description |
|
||||
|---|---|---|
|
||||
| `=` | `eq` | Equal |
|
||||
| `!=` | `neq`, `<>` | Not equal |
|
||||
| `>` | `gt` | Greater than |
|
||||
| `>=` | `gte` | Greater than or equal |
|
||||
| `<` | `lt` | Less than |
|
||||
| `<=` | `lte` | Less than or equal |
|
||||
| `like` | | SQL LIKE (case-sensitive) |
|
||||
| `ilike` | | SQL ILIKE (case-insensitive) |
|
||||
| `in` | | Value in list |
|
||||
| `is_null` | | Column IS NULL |
|
||||
| `is_not_null` | | Column IS NOT NULL |
|
||||
|
||||
### Logic Operators
|
||||
|
||||
- `"logic_operator": "AND"` (default) — filter is AND-chained with the previous condition.
|
||||
- `"logic_operator": "OR"` — filter is OR-grouped with the previous condition.
|
||||
|
||||
Consecutive OR filters are grouped into a single `(cond1 OR cond2 OR ...)` clause.
|
||||
|
||||
---
|
||||
|
||||
## Sorting
|
||||
|
||||
```json
|
||||
[
|
||||
{ "column": "created_at", "direction": "desc" },
|
||||
{ "column": "name", "direction": "asc" }
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pagination
|
||||
|
||||
### Offset-Based
|
||||
|
||||
```json
|
||||
{ "limit": 20, "offset": 40 }
|
||||
```
|
||||
|
||||
### Cursor-Based
|
||||
|
||||
Cursor pagination uses a SQL `EXISTS` subquery for stable, efficient paging. Always pair with a `sort` argument.
|
||||
|
||||
```json
|
||||
// Next page: pass the PK of the last record on the current page
|
||||
{ "cursor_forward": "42", "limit": 20, "sort": [{"column": "id", "direction": "asc"}] }
|
||||
|
||||
// Previous page: pass the PK of the first record on the current page
|
||||
{ "cursor_backward": "23", "limit": 20, "sort": [{"column": "id", "direction": "asc"}] }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Preloading Relations
|
||||
|
||||
```json
|
||||
[
|
||||
{ "relation": "Profile" },
|
||||
{ "relation": "Orders" }
|
||||
]
|
||||
```
|
||||
|
||||
Available relations are listed in each tool's description. Only relations defined on the model struct are valid.
|
||||
|
||||
---
|
||||
|
||||
## Hook System
|
||||
|
||||
Hooks let you intercept and modify CRUD operations at well-defined lifecycle points.
|
||||
|
||||
### Hook Types
|
||||
|
||||
| Constant | Fires |
|
||||
|---|---|
|
||||
| `BeforeHandle` | After model resolution, before operation dispatch (all CRUD) |
|
||||
| `BeforeRead` / `AfterRead` | Around read queries |
|
||||
| `BeforeCreate` / `AfterCreate` | Around insert |
|
||||
| `BeforeUpdate` / `AfterUpdate` | Around update |
|
||||
| `BeforeDelete` / `AfterDelete` | Around delete |
|
||||
|
||||
### Registering Hooks
|
||||
|
||||
```go
|
||||
handler.Hooks().Register(resolvemcp.BeforeCreate, func(ctx *resolvemcp.HookContext) error {
|
||||
// Inject a timestamp before insert
|
||||
if data, ok := ctx.Data.(map[string]interface{}); ok {
|
||||
data["created_at"] = time.Now()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Register the same hook for multiple events
|
||||
handler.Hooks().RegisterMultiple(
|
||||
[]resolvemcp.HookType{resolvemcp.BeforeCreate, resolvemcp.BeforeUpdate},
|
||||
auditHook,
|
||||
)
|
||||
```
|
||||
|
||||
### HookContext Fields
|
||||
|
||||
| Field | Type | Description |
|
||||
|---|---|---|
|
||||
| `Context` | `context.Context` | Request context |
|
||||
| `Handler` | `*Handler` | The resolvemcp handler |
|
||||
| `Schema` | `string` | Database schema name |
|
||||
| `Entity` | `string` | Entity/table name |
|
||||
| `Model` | `interface{}` | Registered model instance |
|
||||
| `Options` | `common.RequestOptions` | Parsed request options (read operations) |
|
||||
| `Operation` | `string` | `"read"`, `"create"`, `"update"`, or `"delete"` |
|
||||
| `ID` | `string` | Primary key from request (read/update/delete) |
|
||||
| `Data` | `interface{}` | Input data (create/update — modifiable) |
|
||||
| `Result` | `interface{}` | Output data (set by After hooks) |
|
||||
| `Error` | `error` | Operation error, if any |
|
||||
| `Query` | `common.SelectQuery` | Live query object (available in `BeforeRead`) |
|
||||
| `Tx` | `common.Database` | Database/transaction handle |
|
||||
| `Abort` | `bool` | Set to `true` to abort the operation |
|
||||
| `AbortMessage` | `string` | Error message returned when aborting |
|
||||
| `AbortCode` | `int` | Optional status code for the abort |
|
||||
|
||||
### Aborting an Operation
|
||||
|
||||
```go
|
||||
handler.Hooks().Register(resolvemcp.BeforeDelete, func(ctx *resolvemcp.HookContext) error {
|
||||
ctx.Abort = true
|
||||
ctx.AbortMessage = "deletion is disabled"
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
### Managing Hooks
|
||||
|
||||
```go
|
||||
registry := handler.Hooks()
|
||||
registry.HasHooks(resolvemcp.BeforeCreate) // bool
|
||||
registry.Clear(resolvemcp.BeforeCreate) // remove hooks for one type
|
||||
registry.ClearAll() // remove all hooks
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Context Helpers
|
||||
|
||||
Request metadata is threaded through `context.Context` during handler execution. Hooks and custom tools can read it:
|
||||
|
||||
```go
|
||||
schema := resolvemcp.GetSchema(ctx)
|
||||
entity := resolvemcp.GetEntity(ctx)
|
||||
tableName := resolvemcp.GetTableName(ctx)
|
||||
model := resolvemcp.GetModel(ctx)
|
||||
modelPtr := resolvemcp.GetModelPtr(ctx)
|
||||
```
|
||||
|
||||
You can also set values manually (e.g. in middleware):
|
||||
|
||||
```go
|
||||
ctx = resolvemcp.WithSchema(ctx, "tenant_a")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Adding Custom MCP Tools
|
||||
|
||||
Access the underlying `*server.MCPServer` to register additional tools:
|
||||
|
||||
```go
|
||||
mcpServer := handler.MCPServer()
|
||||
mcpServer.AddTool(myTool, myHandler)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Table Name Resolution
|
||||
|
||||
The handler resolves table names in priority order:
|
||||
|
||||
1. `TableNameProvider` interface — `TableName() string` (can return `"schema.table"`)
|
||||
2. `SchemaProvider` interface — `SchemaName() string` (combined with entity name)
|
||||
3. Fallback: `schema.entity` (or `schema_entity` for SQLite)
|
||||
107
pkg/resolvemcp/annotation.go
Normal file
107
pkg/resolvemcp/annotation.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package resolvemcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
)
|
||||
|
||||
const annotationToolName = "resolvespec_annotate"
|
||||
|
||||
// registerAnnotationTool adds the resolvespec_annotate tool to the MCP server.
|
||||
// The tool lets models/entities store and retrieve freeform annotation records
|
||||
// using the resolvespec_set_annotation / resolvespec_get_annotation database procedures.
|
||||
func registerAnnotationTool(h *Handler) {
|
||||
tool := mcp.NewTool(annotationToolName,
|
||||
mcp.WithDescription(
|
||||
"Store or retrieve annotations for any MCP tool, model, or entity.\n\n"+
|
||||
"To set annotations: provide both 'tool_name' and 'annotations'. "+
|
||||
"Calls resolvespec_set_annotation(tool_name, annotations) to persist the data.\n\n"+
|
||||
"To get annotations: provide only 'tool_name'. "+
|
||||
"Calls resolvespec_get_annotation(tool_name) and returns the stored annotations.\n\n"+
|
||||
"'tool_name' may be any identifier: an MCP tool name (e.g. 'read_public_users'), "+
|
||||
"a model/entity name (e.g. 'public.users'), or any other key.",
|
||||
),
|
||||
mcp.WithString("tool_name",
|
||||
mcp.Description("Name of the tool, model, or entity to annotate (e.g. 'read_public_users', 'public.users')."),
|
||||
mcp.Required(),
|
||||
),
|
||||
mcp.WithObject("annotations",
|
||||
mcp.Description("Annotation data to store. Omit to retrieve existing annotations instead of setting them."),
|
||||
),
|
||||
)
|
||||
|
||||
h.mcpServer.AddTool(tool, func(ctx context.Context, req mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
args := req.GetArguments()
|
||||
|
||||
toolName, ok := args["tool_name"].(string)
|
||||
if !ok || toolName == "" {
|
||||
return mcp.NewToolResultError("missing required argument: tool_name"), nil
|
||||
}
|
||||
|
||||
annotations, hasAnnotations := args["annotations"]
|
||||
|
||||
if hasAnnotations && annotations != nil {
|
||||
return executeSetAnnotation(ctx, h, toolName, annotations)
|
||||
}
|
||||
return executeGetAnnotation(ctx, h, toolName)
|
||||
})
|
||||
}
|
||||
|
||||
func executeSetAnnotation(ctx context.Context, h *Handler, toolName string, annotations interface{}) (*mcp.CallToolResult, error) {
|
||||
jsonBytes, err := json.Marshal(annotations)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError(fmt.Sprintf("failed to marshal annotations: %v", err)), nil
|
||||
}
|
||||
|
||||
_, err = h.db.Exec(ctx, "SELECT resolvespec_set_annotation($1, $2)", toolName, string(jsonBytes))
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError(fmt.Sprintf("failed to set annotation: %v", err)), nil
|
||||
}
|
||||
|
||||
return marshalResult(map[string]interface{}{
|
||||
"success": true,
|
||||
"tool_name": toolName,
|
||||
"action": "set",
|
||||
})
|
||||
}
|
||||
|
||||
func executeGetAnnotation(ctx context.Context, h *Handler, toolName string) (*mcp.CallToolResult, error) {
|
||||
var rows []map[string]interface{}
|
||||
err := h.db.Query(ctx, &rows, "SELECT resolvespec_get_annotation($1)", toolName)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError(fmt.Sprintf("failed to get annotation: %v", err)), nil
|
||||
}
|
||||
|
||||
var annotations interface{}
|
||||
if len(rows) > 0 {
|
||||
// The procedure returns a single value; extract the first column of the first row.
|
||||
for _, v := range rows[0] {
|
||||
annotations = v
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If the value is a []byte or string containing JSON, decode it so it round-trips cleanly.
|
||||
switch v := annotations.(type) {
|
||||
case []byte:
|
||||
var decoded interface{}
|
||||
if json.Unmarshal(v, &decoded) == nil {
|
||||
annotations = decoded
|
||||
}
|
||||
case string:
|
||||
var decoded interface{}
|
||||
if json.Unmarshal([]byte(v), &decoded) == nil {
|
||||
annotations = decoded
|
||||
}
|
||||
}
|
||||
|
||||
return marshalResult(map[string]interface{}{
|
||||
"success": true,
|
||||
"tool_name": toolName,
|
||||
"action": "get",
|
||||
"annotations": annotations,
|
||||
})
|
||||
}
|
||||
71
pkg/resolvemcp/context.go
Normal file
71
pkg/resolvemcp/context.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package resolvemcp
|
||||
|
||||
import "context"
|
||||
|
||||
type contextKey string
|
||||
|
||||
const (
|
||||
contextKeySchema contextKey = "schema"
|
||||
contextKeyEntity contextKey = "entity"
|
||||
contextKeyTableName contextKey = "tableName"
|
||||
contextKeyModel contextKey = "model"
|
||||
contextKeyModelPtr contextKey = "modelPtr"
|
||||
)
|
||||
|
||||
func WithSchema(ctx context.Context, schema string) context.Context {
|
||||
return context.WithValue(ctx, contextKeySchema, schema)
|
||||
}
|
||||
|
||||
func GetSchema(ctx context.Context) string {
|
||||
if v := ctx.Value(contextKeySchema); v != nil {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func WithEntity(ctx context.Context, entity string) context.Context {
|
||||
return context.WithValue(ctx, contextKeyEntity, entity)
|
||||
}
|
||||
|
||||
func GetEntity(ctx context.Context) string {
|
||||
if v := ctx.Value(contextKeyEntity); v != nil {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func WithTableName(ctx context.Context, tableName string) context.Context {
|
||||
return context.WithValue(ctx, contextKeyTableName, tableName)
|
||||
}
|
||||
|
||||
func GetTableName(ctx context.Context) string {
|
||||
if v := ctx.Value(contextKeyTableName); v != nil {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func WithModel(ctx context.Context, model interface{}) context.Context {
|
||||
return context.WithValue(ctx, contextKeyModel, model)
|
||||
}
|
||||
|
||||
func GetModel(ctx context.Context) interface{} {
|
||||
return ctx.Value(contextKeyModel)
|
||||
}
|
||||
|
||||
func WithModelPtr(ctx context.Context, modelPtr interface{}) context.Context {
|
||||
return context.WithValue(ctx, contextKeyModelPtr, modelPtr)
|
||||
}
|
||||
|
||||
func GetModelPtr(ctx context.Context) interface{} {
|
||||
return ctx.Value(contextKeyModelPtr)
|
||||
}
|
||||
|
||||
func withRequestData(ctx context.Context, schema, entity, tableName string, model, modelPtr interface{}) context.Context {
|
||||
ctx = WithSchema(ctx, schema)
|
||||
ctx = WithEntity(ctx, entity)
|
||||
ctx = WithTableName(ctx, tableName)
|
||||
ctx = WithModel(ctx, model)
|
||||
ctx = WithModelPtr(ctx, modelPtr)
|
||||
return ctx
|
||||
}
|
||||
161
pkg/resolvemcp/cursor.go
Normal file
161
pkg/resolvemcp/cursor.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package resolvemcp
|
||||
|
||||
// Cursor-based pagination adapted from pkg/resolvespec/cursor.go.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
type cursorDirection int
|
||||
|
||||
const (
|
||||
cursorForward cursorDirection = 1
|
||||
cursorBackward cursorDirection = -1
|
||||
)
|
||||
|
||||
// getCursorFilter generates a SQL EXISTS subquery for cursor-based pagination.
|
||||
// expandJoins is an optional map[alias]string of JOIN clauses for join-column sort support.
|
||||
func getCursorFilter(
|
||||
tableName string,
|
||||
pkName string,
|
||||
modelColumns []string,
|
||||
options common.RequestOptions,
|
||||
expandJoins map[string]string,
|
||||
) (string, error) {
|
||||
fullTableName := tableName
|
||||
if strings.Contains(tableName, ".") {
|
||||
tableName = strings.SplitN(tableName, ".", 2)[1]
|
||||
}
|
||||
|
||||
cursorID, direction := getActiveCursor(options)
|
||||
if cursorID == "" {
|
||||
return "", fmt.Errorf("no cursor provided for table %s", tableName)
|
||||
}
|
||||
|
||||
sortItems := options.Sort
|
||||
if len(sortItems) == 0 {
|
||||
return "", fmt.Errorf("no sort columns defined")
|
||||
}
|
||||
|
||||
var whereClauses []string
|
||||
joinSQL := ""
|
||||
reverse := direction < 0
|
||||
|
||||
for _, s := range sortItems {
|
||||
col := strings.Trim(strings.TrimSpace(s.Column), "()")
|
||||
if col == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(col, ".")
|
||||
field := strings.TrimSpace(parts[len(parts)-1])
|
||||
prefix := strings.Join(parts[:len(parts)-1], ".")
|
||||
|
||||
desc := strings.EqualFold(s.Direction, "desc")
|
||||
if reverse {
|
||||
desc = !desc
|
||||
}
|
||||
|
||||
cursorCol, targetCol, isJoin, err := resolveCursorColumn(field, prefix, tableName, modelColumns)
|
||||
if err != nil {
|
||||
logger.Warn("Skipping invalid sort column %q: %v", col, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if isJoin {
|
||||
if expandJoins != nil {
|
||||
if joinClause, ok := expandJoins[prefix]; ok {
|
||||
jSQL, cRef := rewriteCursorJoin(joinClause, tableName, prefix)
|
||||
joinSQL = jSQL
|
||||
cursorCol = cRef + "." + field
|
||||
targetCol = prefix + "." + field
|
||||
}
|
||||
}
|
||||
if cursorCol == "" {
|
||||
logger.Warn("Skipping cursor sort column %q: join alias %q not in expandJoins", col, prefix)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
op := "<"
|
||||
if desc {
|
||||
op = ">"
|
||||
}
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s %s %s", cursorCol, op, targetCol))
|
||||
}
|
||||
|
||||
if len(whereClauses) == 0 {
|
||||
return "", fmt.Errorf("no valid sort columns after filtering")
|
||||
}
|
||||
|
||||
orSQL := buildCursorPriorityChain(whereClauses)
|
||||
|
||||
query := fmt.Sprintf(`EXISTS (
|
||||
SELECT 1
|
||||
FROM %s cursor_select
|
||||
%s
|
||||
WHERE cursor_select.%s = %s
|
||||
AND (%s)
|
||||
)`,
|
||||
fullTableName,
|
||||
joinSQL,
|
||||
pkName,
|
||||
cursorID,
|
||||
orSQL,
|
||||
)
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
func getActiveCursor(options common.RequestOptions) (id string, direction cursorDirection) {
|
||||
if options.CursorForward != "" {
|
||||
return options.CursorForward, cursorForward
|
||||
}
|
||||
if options.CursorBackward != "" {
|
||||
return options.CursorBackward, cursorBackward
|
||||
}
|
||||
return "", 0
|
||||
}
|
||||
|
||||
func resolveCursorColumn(field, prefix, tableName string, modelColumns []string) (cursorCol, targetCol string, isJoin bool, err error) {
|
||||
if strings.Contains(field, "->") {
|
||||
return "cursor_select." + field, tableName + "." + field, false, nil
|
||||
}
|
||||
|
||||
if modelColumns != nil {
|
||||
for _, col := range modelColumns {
|
||||
if strings.EqualFold(col, field) {
|
||||
return "cursor_select." + field, tableName + "." + field, false, nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return "cursor_select." + field, tableName + "." + field, false, nil
|
||||
}
|
||||
|
||||
if prefix != "" && prefix != tableName {
|
||||
return "", "", true, nil
|
||||
}
|
||||
|
||||
return "", "", false, fmt.Errorf("invalid column: %s", field)
|
||||
}
|
||||
|
||||
func rewriteCursorJoin(joinClause, mainTable, alias string) (joinSQL, cursorAlias string) {
|
||||
joinSQL = strings.ReplaceAll(joinClause, mainTable+".", "cursor_select.")
|
||||
cursorAlias = "cursor_select_" + alias
|
||||
joinSQL = strings.ReplaceAll(joinSQL, " "+alias+" ", " "+cursorAlias+" ")
|
||||
joinSQL = strings.ReplaceAll(joinSQL, " "+alias+".", " "+cursorAlias+".")
|
||||
return joinSQL, cursorAlias
|
||||
}
|
||||
|
||||
func buildCursorPriorityChain(clauses []string) string {
|
||||
var or []string
|
||||
for i := 0; i < len(clauses); i++ {
|
||||
and := strings.Join(clauses[:i+1], "\n AND ")
|
||||
or = append(or, "("+and+")")
|
||||
}
|
||||
return strings.Join(or, "\n OR ")
|
||||
}
|
||||
736
pkg/resolvemcp/handler.go
Normal file
736
pkg/resolvemcp/handler.go
Normal file
@@ -0,0 +1,736 @@
|
||||
package resolvemcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/mark3labs/mcp-go/server"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/modelregistry"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/reflection"
|
||||
)
|
||||
|
||||
// Handler exposes registered database models as MCP tools and resources.
|
||||
type Handler struct {
|
||||
db common.Database
|
||||
registry common.ModelRegistry
|
||||
hooks *HookRegistry
|
||||
mcpServer *server.MCPServer
|
||||
config Config
|
||||
name string
|
||||
version string
|
||||
}
|
||||
|
||||
// NewHandler creates a Handler with the given database, model registry, and config.
|
||||
func NewHandler(db common.Database, registry common.ModelRegistry, cfg Config) *Handler {
|
||||
h := &Handler{
|
||||
db: db,
|
||||
registry: registry,
|
||||
hooks: NewHookRegistry(),
|
||||
mcpServer: server.NewMCPServer("resolvemcp", "1.0.0"),
|
||||
config: cfg,
|
||||
name: "resolvemcp",
|
||||
version: "1.0.0",
|
||||
}
|
||||
registerAnnotationTool(h)
|
||||
return h
|
||||
}
|
||||
|
||||
// Hooks returns the hook registry.
|
||||
func (h *Handler) Hooks() *HookRegistry {
|
||||
return h.hooks
|
||||
}
|
||||
|
||||
// GetDatabase returns the underlying database.
|
||||
func (h *Handler) GetDatabase() common.Database {
|
||||
return h.db
|
||||
}
|
||||
|
||||
// MCPServer returns the underlying MCP server, e.g. to add custom tools.
|
||||
func (h *Handler) MCPServer() *server.MCPServer {
|
||||
return h.mcpServer
|
||||
}
|
||||
|
||||
// SSEServer returns an http.Handler that serves MCP over SSE.
|
||||
// Config.BasePath must be set. Config.BaseURL is used when set; if empty it is
|
||||
// detected automatically from each incoming request.
|
||||
func (h *Handler) SSEServer() http.Handler {
|
||||
if h.config.BaseURL != "" {
|
||||
return h.newSSEServer(h.config.BaseURL, h.config.BasePath)
|
||||
}
|
||||
return &dynamicSSEHandler{h: h}
|
||||
}
|
||||
|
||||
// newSSEServer creates a concrete *server.SSEServer for known baseURL and basePath values.
|
||||
func (h *Handler) newSSEServer(baseURL, basePath string) *server.SSEServer {
|
||||
return server.NewSSEServer(
|
||||
h.mcpServer,
|
||||
server.WithBaseURL(baseURL),
|
||||
server.WithStaticBasePath(basePath),
|
||||
)
|
||||
}
|
||||
|
||||
// dynamicSSEHandler detects BaseURL from each request and delegates to a cached
|
||||
// *server.SSEServer per detected baseURL. Used when Config.BaseURL is empty.
|
||||
type dynamicSSEHandler struct {
|
||||
h *Handler
|
||||
mu sync.Mutex
|
||||
pool map[string]*server.SSEServer
|
||||
}
|
||||
|
||||
func (d *dynamicSSEHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
baseURL := requestBaseURL(r)
|
||||
|
||||
d.mu.Lock()
|
||||
if d.pool == nil {
|
||||
d.pool = make(map[string]*server.SSEServer)
|
||||
}
|
||||
s, ok := d.pool[baseURL]
|
||||
if !ok {
|
||||
s = d.h.newSSEServer(baseURL, d.h.config.BasePath)
|
||||
d.pool[baseURL] = s
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
s.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// requestBaseURL builds the base URL from an incoming request.
|
||||
// It honours the X-Forwarded-Proto header for deployments behind a proxy.
|
||||
func requestBaseURL(r *http.Request) string {
|
||||
scheme := "http"
|
||||
if r.TLS != nil {
|
||||
scheme = "https"
|
||||
}
|
||||
if proto := r.Header.Get("X-Forwarded-Proto"); proto != "" {
|
||||
scheme = proto
|
||||
}
|
||||
return scheme + "://" + r.Host
|
||||
}
|
||||
|
||||
// RegisterModel registers a model and immediately exposes it as MCP tools and a resource.
|
||||
func (h *Handler) RegisterModel(schema, entity string, model interface{}) error {
|
||||
fullName := buildModelName(schema, entity)
|
||||
if err := h.registry.RegisterModel(fullName, model); err != nil {
|
||||
return err
|
||||
}
|
||||
registerModelTools(h, schema, entity, model)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterModelWithRules registers a model and sets per-entity operation rules
|
||||
// (CanRead, CanCreate, CanUpdate, CanDelete, CanPublic*, SecurityDisabled).
|
||||
// Requires RegisterSecurityHooks to have been called for the rules to be enforced.
|
||||
func (h *Handler) RegisterModelWithRules(schema, entity string, model interface{}, rules modelregistry.ModelRules) error {
|
||||
reg, ok := h.registry.(*modelregistry.DefaultModelRegistry)
|
||||
if !ok {
|
||||
return fmt.Errorf("resolvemcp: registry does not support model rules (use NewHandlerWithGORM/Bun/DB)")
|
||||
}
|
||||
fullName := buildModelName(schema, entity)
|
||||
if err := reg.RegisterModelWithRules(fullName, model, rules); err != nil {
|
||||
return err
|
||||
}
|
||||
registerModelTools(h, schema, entity, model)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModelRules updates the operation rules for an already-registered model.
|
||||
// Requires RegisterSecurityHooks to have been called for the rules to be enforced.
|
||||
func (h *Handler) SetModelRules(schema, entity string, rules modelregistry.ModelRules) error {
|
||||
reg, ok := h.registry.(*modelregistry.DefaultModelRegistry)
|
||||
if !ok {
|
||||
return fmt.Errorf("resolvemcp: registry does not support model rules (use NewHandlerWithGORM/Bun/DB)")
|
||||
}
|
||||
return reg.SetModelRules(buildModelName(schema, entity), rules)
|
||||
}
|
||||
|
||||
// buildModelName builds the registry key for a model (same format as resolvespec).
|
||||
func buildModelName(schema, entity string) string {
|
||||
if schema == "" {
|
||||
return entity
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", schema, entity)
|
||||
}
|
||||
|
||||
// getTableName returns the fully qualified table name for a model.
|
||||
func (h *Handler) getTableName(schema, entity string, model interface{}) string {
|
||||
schemaName, tableName := h.getSchemaAndTable(schema, entity, model)
|
||||
if schemaName != "" {
|
||||
if h.db.DriverName() == "sqlite" {
|
||||
return fmt.Sprintf("%s_%s", schemaName, tableName)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", schemaName, tableName)
|
||||
}
|
||||
return tableName
|
||||
}
|
||||
|
||||
func (h *Handler) getSchemaAndTable(defaultSchema, entity string, model interface{}) (schema, table string) {
|
||||
if tableProvider, ok := model.(common.TableNameProvider); ok {
|
||||
tableName := tableProvider.TableName()
|
||||
if idx := strings.LastIndex(tableName, "."); idx != -1 {
|
||||
return tableName[:idx], tableName[idx+1:]
|
||||
}
|
||||
if schemaProvider, ok := model.(common.SchemaProvider); ok {
|
||||
return schemaProvider.SchemaName(), tableName
|
||||
}
|
||||
return defaultSchema, tableName
|
||||
}
|
||||
if schemaProvider, ok := model.(common.SchemaProvider); ok {
|
||||
return schemaProvider.SchemaName(), entity
|
||||
}
|
||||
return defaultSchema, entity
|
||||
}
|
||||
|
||||
// executeRead reads records from the database and returns raw data + metadata.
|
||||
func (h *Handler) executeRead(ctx context.Context, schema, entity, id string, options common.RequestOptions) (interface{}, *common.Metadata, error) {
|
||||
model, err := h.registry.GetModelByEntity(schema, entity)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("model not found: %w", err)
|
||||
}
|
||||
|
||||
unwrapped, err := common.ValidateAndUnwrapModel(model)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid model: %w", err)
|
||||
}
|
||||
|
||||
model = unwrapped.Model
|
||||
modelType := unwrapped.ModelType
|
||||
tableName := h.getTableName(schema, entity, model)
|
||||
ctx = withRequestData(ctx, schema, entity, tableName, model, unwrapped.ModelPtr)
|
||||
|
||||
validator := common.NewColumnValidator(model)
|
||||
options = validator.FilterRequestOptions(options)
|
||||
|
||||
// BeforeHandle hook
|
||||
hookCtx := &HookContext{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
Schema: schema,
|
||||
Entity: entity,
|
||||
Model: model,
|
||||
Operation: "read",
|
||||
Options: options,
|
||||
ID: id,
|
||||
Tx: h.db,
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeHandle, hookCtx); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sliceType := reflect.SliceOf(reflect.PointerTo(modelType))
|
||||
modelPtr := reflect.New(sliceType).Interface()
|
||||
|
||||
query := h.db.NewSelect().Model(modelPtr)
|
||||
|
||||
tempInstance := reflect.New(modelType).Interface()
|
||||
if provider, ok := tempInstance.(common.TableNameProvider); !ok || provider.TableName() == "" {
|
||||
query = query.Table(tableName)
|
||||
}
|
||||
|
||||
// Column selection
|
||||
if len(options.Columns) == 0 && len(options.ComputedColumns) > 0 {
|
||||
options.Columns = reflection.GetSQLModelColumns(model)
|
||||
}
|
||||
for _, col := range options.Columns {
|
||||
query = query.Column(reflection.ExtractSourceColumn(col))
|
||||
}
|
||||
for _, cu := range options.ComputedColumns {
|
||||
query = query.ColumnExpr(fmt.Sprintf("(%s) AS %s", cu.Expression, cu.Name))
|
||||
}
|
||||
|
||||
// Preloads
|
||||
if len(options.Preload) > 0 {
|
||||
var err error
|
||||
query, err = h.applyPreloads(model, query, options.Preload)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to apply preloads: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Filters
|
||||
query = h.applyFilters(query, options.Filters)
|
||||
|
||||
// Custom operators
|
||||
for _, customOp := range options.CustomOperators {
|
||||
query = query.Where(customOp.SQL)
|
||||
}
|
||||
|
||||
// Sorting
|
||||
for _, sort := range options.Sort {
|
||||
direction := "ASC"
|
||||
if strings.EqualFold(sort.Direction, "desc") {
|
||||
direction = "DESC"
|
||||
}
|
||||
query = query.Order(fmt.Sprintf("%s %s", sort.Column, direction))
|
||||
}
|
||||
|
||||
// Cursor pagination
|
||||
if options.CursorForward != "" || options.CursorBackward != "" {
|
||||
pkName := reflection.GetPrimaryKeyName(model)
|
||||
modelColumns := reflection.GetModelColumns(model)
|
||||
|
||||
if len(options.Sort) == 0 {
|
||||
options.Sort = []common.SortOption{{Column: pkName, Direction: "ASC"}}
|
||||
}
|
||||
|
||||
// expandJoins is empty for resolvemcp — no custom SQL join support yet
|
||||
cursorFilter, err := getCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cursor error: %w", err)
|
||||
}
|
||||
|
||||
if cursorFilter != "" {
|
||||
sanitized := common.SanitizeWhereClause(cursorFilter, reflection.ExtractTableNameOnly(tableName), &options)
|
||||
sanitized = common.EnsureOuterParentheses(sanitized)
|
||||
if sanitized != "" {
|
||||
query = query.Where(sanitized)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count
|
||||
total, err := query.Count(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error counting records: %w", err)
|
||||
}
|
||||
|
||||
// Pagination
|
||||
if options.Limit != nil && *options.Limit > 0 {
|
||||
query = query.Limit(*options.Limit)
|
||||
}
|
||||
if options.Offset != nil && *options.Offset > 0 {
|
||||
query = query.Offset(*options.Offset)
|
||||
}
|
||||
|
||||
// BeforeRead hook
|
||||
hookCtx.Query = query
|
||||
if err := h.hooks.Execute(BeforeRead, hookCtx); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var data interface{}
|
||||
if id != "" {
|
||||
singleResult := reflect.New(modelType).Interface()
|
||||
pkName := reflection.GetPrimaryKeyName(singleResult)
|
||||
query = query.Where(fmt.Sprintf("%s = ?", common.QuoteIdent(pkName)), id)
|
||||
if err := query.Scan(ctx, singleResult); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil, fmt.Errorf("record not found")
|
||||
}
|
||||
return nil, nil, fmt.Errorf("query error: %w", err)
|
||||
}
|
||||
data = singleResult
|
||||
} else {
|
||||
if err := query.Scan(ctx, modelPtr); err != nil {
|
||||
return nil, nil, fmt.Errorf("query error: %w", err)
|
||||
}
|
||||
data = reflect.ValueOf(modelPtr).Elem().Interface()
|
||||
}
|
||||
|
||||
limit := 0
|
||||
offset := 0
|
||||
if options.Limit != nil {
|
||||
limit = *options.Limit
|
||||
}
|
||||
if options.Offset != nil {
|
||||
offset = *options.Offset
|
||||
}
|
||||
|
||||
// Count is the number of records in this page, not the total.
|
||||
var pageCount int64
|
||||
if id != "" {
|
||||
pageCount = 1
|
||||
} else {
|
||||
pageCount = int64(reflect.ValueOf(data).Len())
|
||||
}
|
||||
|
||||
metadata := &common.Metadata{
|
||||
Total: int64(total),
|
||||
Filtered: int64(total),
|
||||
Count: pageCount,
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
}
|
||||
|
||||
// AfterRead hook
|
||||
hookCtx.Result = data
|
||||
if err := h.hooks.Execute(AfterRead, hookCtx); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return data, metadata, nil
|
||||
}
|
||||
|
||||
// executeCreate inserts one or more records.
|
||||
func (h *Handler) executeCreate(ctx context.Context, schema, entity string, data interface{}) (interface{}, error) {
|
||||
model, err := h.registry.GetModelByEntity(schema, entity)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("model not found: %w", err)
|
||||
}
|
||||
|
||||
result, err := common.ValidateAndUnwrapModel(model)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid model: %w", err)
|
||||
}
|
||||
|
||||
model = result.Model
|
||||
tableName := h.getTableName(schema, entity, model)
|
||||
ctx = withRequestData(ctx, schema, entity, tableName, model, result.ModelPtr)
|
||||
|
||||
hookCtx := &HookContext{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
Schema: schema,
|
||||
Entity: entity,
|
||||
Model: model,
|
||||
Operation: "create",
|
||||
Data: data,
|
||||
Tx: h.db,
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeHandle, hookCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeCreate, hookCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use potentially modified data
|
||||
data = hookCtx.Data
|
||||
|
||||
switch v := data.(type) {
|
||||
case map[string]interface{}:
|
||||
query := h.db.NewInsert().Table(tableName)
|
||||
for key, value := range v {
|
||||
query = query.Value(key, value)
|
||||
}
|
||||
if _, err := query.Exec(ctx); err != nil {
|
||||
return nil, fmt.Errorf("create error: %w", err)
|
||||
}
|
||||
hookCtx.Result = v
|
||||
if err := h.hooks.Execute(AfterCreate, hookCtx); err != nil {
|
||||
return nil, fmt.Errorf("AfterCreate hook failed: %w", err)
|
||||
}
|
||||
return v, nil
|
||||
|
||||
case []interface{}:
|
||||
results := make([]interface{}, 0, len(v))
|
||||
err := h.db.RunInTransaction(ctx, func(tx common.Database) error {
|
||||
for _, item := range v {
|
||||
itemMap, ok := item.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("each item must be an object")
|
||||
}
|
||||
q := tx.NewInsert().Table(tableName)
|
||||
for key, value := range itemMap {
|
||||
q = q.Value(key, value)
|
||||
}
|
||||
if _, err := q.Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
results = append(results, item)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("batch create error: %w", err)
|
||||
}
|
||||
hookCtx.Result = results
|
||||
if err := h.hooks.Execute(AfterCreate, hookCtx); err != nil {
|
||||
return nil, fmt.Errorf("AfterCreate hook failed: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("data must be an object or array of objects")
|
||||
}
|
||||
}
|
||||
|
||||
// executeUpdate updates a record by ID.
|
||||
func (h *Handler) executeUpdate(ctx context.Context, schema, entity, id string, data interface{}) (interface{}, error) {
|
||||
model, err := h.registry.GetModelByEntity(schema, entity)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("model not found: %w", err)
|
||||
}
|
||||
|
||||
result, err := common.ValidateAndUnwrapModel(model)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid model: %w", err)
|
||||
}
|
||||
|
||||
model = result.Model
|
||||
tableName := h.getTableName(schema, entity, model)
|
||||
ctx = withRequestData(ctx, schema, entity, tableName, model, result.ModelPtr)
|
||||
|
||||
updates, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("data must be an object")
|
||||
}
|
||||
|
||||
if id == "" {
|
||||
if idVal, exists := updates["id"]; exists {
|
||||
id = fmt.Sprintf("%v", idVal)
|
||||
}
|
||||
}
|
||||
if id == "" {
|
||||
return nil, fmt.Errorf("update requires an ID")
|
||||
}
|
||||
|
||||
pkName := reflection.GetPrimaryKeyName(model)
|
||||
|
||||
var updateResult interface{}
|
||||
err = h.db.RunInTransaction(ctx, func(tx common.Database) error {
|
||||
// Read existing record
|
||||
modelType := reflect.TypeOf(model)
|
||||
if modelType.Kind() == reflect.Ptr {
|
||||
modelType = modelType.Elem()
|
||||
}
|
||||
existingRecord := reflect.New(modelType).Interface()
|
||||
selectQuery := tx.NewSelect().Model(existingRecord).Column("*").
|
||||
Where(fmt.Sprintf("%s = ?", common.QuoteIdent(pkName)), id)
|
||||
|
||||
if err := selectQuery.ScanModel(ctx); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return fmt.Errorf("no records found to update")
|
||||
}
|
||||
return fmt.Errorf("error fetching existing record: %w", err)
|
||||
}
|
||||
|
||||
// Convert to map
|
||||
existingMap := make(map[string]interface{})
|
||||
jsonData, err := json.Marshal(existingRecord)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling existing record: %w", err)
|
||||
}
|
||||
if err := json.Unmarshal(jsonData, &existingMap); err != nil {
|
||||
return fmt.Errorf("error unmarshaling existing record: %w", err)
|
||||
}
|
||||
|
||||
hookCtx := &HookContext{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
Schema: schema,
|
||||
Entity: entity,
|
||||
Model: model,
|
||||
Operation: "update",
|
||||
ID: id,
|
||||
Data: updates,
|
||||
Tx: tx,
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeUpdate, hookCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
if modifiedData, ok := hookCtx.Data.(map[string]interface{}); ok {
|
||||
updates = modifiedData
|
||||
}
|
||||
|
||||
// Merge non-nil, non-empty values
|
||||
for key, newValue := range updates {
|
||||
if newValue == nil {
|
||||
continue
|
||||
}
|
||||
if strVal, ok := newValue.(string); ok && strVal == "" {
|
||||
continue
|
||||
}
|
||||
existingMap[key] = newValue
|
||||
}
|
||||
|
||||
q := tx.NewUpdate().Table(tableName).SetMap(existingMap).
|
||||
Where(fmt.Sprintf("%s = ?", common.QuoteIdent(pkName)), id)
|
||||
res, err := q.Exec(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating record: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("no records found to update")
|
||||
}
|
||||
|
||||
updateResult = existingMap
|
||||
hookCtx.Result = updateResult
|
||||
return h.hooks.Execute(AfterUpdate, hookCtx)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return updateResult, nil
|
||||
}
|
||||
|
||||
// executeDelete deletes a record by ID.
|
||||
func (h *Handler) executeDelete(ctx context.Context, schema, entity, id string) (interface{}, error) {
|
||||
if id == "" {
|
||||
return nil, fmt.Errorf("delete requires an ID")
|
||||
}
|
||||
|
||||
model, err := h.registry.GetModelByEntity(schema, entity)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("model not found: %w", err)
|
||||
}
|
||||
|
||||
result, err := common.ValidateAndUnwrapModel(model)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid model: %w", err)
|
||||
}
|
||||
|
||||
model = result.Model
|
||||
tableName := h.getTableName(schema, entity, model)
|
||||
ctx = withRequestData(ctx, schema, entity, tableName, model, result.ModelPtr)
|
||||
|
||||
pkName := reflection.GetPrimaryKeyName(model)
|
||||
|
||||
hookCtx := &HookContext{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
Schema: schema,
|
||||
Entity: entity,
|
||||
Model: model,
|
||||
Operation: "delete",
|
||||
ID: id,
|
||||
Tx: h.db,
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeHandle, hookCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeDelete, hookCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
modelType := reflect.TypeOf(model)
|
||||
if modelType.Kind() == reflect.Ptr {
|
||||
modelType = modelType.Elem()
|
||||
}
|
||||
|
||||
var recordToDelete interface{}
|
||||
|
||||
err = h.db.RunInTransaction(ctx, func(tx common.Database) error {
|
||||
record := reflect.New(modelType).Interface()
|
||||
selectQuery := tx.NewSelect().Model(record).
|
||||
Where(fmt.Sprintf("%s = ?", common.QuoteIdent(pkName)), id)
|
||||
if err := selectQuery.ScanModel(ctx); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return fmt.Errorf("record not found")
|
||||
}
|
||||
return fmt.Errorf("error fetching record: %w", err)
|
||||
}
|
||||
|
||||
res, err := tx.NewDelete().Table(tableName).
|
||||
Where(fmt.Sprintf("%s = ?", common.QuoteIdent(pkName)), id).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete error: %w", err)
|
||||
}
|
||||
if res.RowsAffected() == 0 {
|
||||
return fmt.Errorf("record not found or already deleted")
|
||||
}
|
||||
|
||||
recordToDelete = record
|
||||
hookCtx.Tx = tx
|
||||
hookCtx.Result = record
|
||||
return h.hooks.Execute(AfterDelete, hookCtx)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Info("[resolvemcp] Deleted record %s from %s.%s", id, schema, entity)
|
||||
return recordToDelete, nil
|
||||
}
|
||||
|
||||
// applyFilters applies all filters with OR grouping logic.
|
||||
func (h *Handler) applyFilters(query common.SelectQuery, filters []common.FilterOption) common.SelectQuery {
|
||||
if len(filters) == 0 {
|
||||
return query
|
||||
}
|
||||
|
||||
i := 0
|
||||
for i < len(filters) {
|
||||
startORGroup := i+1 < len(filters) && strings.EqualFold(filters[i+1].LogicOperator, "OR")
|
||||
|
||||
if startORGroup {
|
||||
orGroup := []common.FilterOption{filters[i]}
|
||||
j := i + 1
|
||||
for j < len(filters) && strings.EqualFold(filters[j].LogicOperator, "OR") {
|
||||
orGroup = append(orGroup, filters[j])
|
||||
j++
|
||||
}
|
||||
query = h.applyFilterGroup(query, orGroup)
|
||||
i = j
|
||||
} else {
|
||||
condition, args := h.buildFilterCondition(filters[i])
|
||||
if condition != "" {
|
||||
query = query.Where(condition, args...)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
return query
|
||||
}
|
||||
|
||||
func (h *Handler) applyFilterGroup(query common.SelectQuery, filters []common.FilterOption) common.SelectQuery {
|
||||
var conditions []string
|
||||
var args []interface{}
|
||||
|
||||
for _, filter := range filters {
|
||||
condition, filterArgs := h.buildFilterCondition(filter)
|
||||
if condition != "" {
|
||||
conditions = append(conditions, condition)
|
||||
args = append(args, filterArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(conditions) == 0 {
|
||||
return query
|
||||
}
|
||||
if len(conditions) == 1 {
|
||||
return query.Where(conditions[0], args...)
|
||||
}
|
||||
return query.Where("("+strings.Join(conditions, " OR ")+")", args...)
|
||||
}
|
||||
|
||||
func (h *Handler) buildFilterCondition(filter common.FilterOption) (condition string, args []interface{}) {
|
||||
switch filter.Operator {
|
||||
case "eq", "=":
|
||||
return fmt.Sprintf("%s = ?", filter.Column), []interface{}{filter.Value}
|
||||
case "neq", "!=", "<>":
|
||||
return fmt.Sprintf("%s != ?", filter.Column), []interface{}{filter.Value}
|
||||
case "gt", ">":
|
||||
return fmt.Sprintf("%s > ?", filter.Column), []interface{}{filter.Value}
|
||||
case "gte", ">=":
|
||||
return fmt.Sprintf("%s >= ?", filter.Column), []interface{}{filter.Value}
|
||||
case "lt", "<":
|
||||
return fmt.Sprintf("%s < ?", filter.Column), []interface{}{filter.Value}
|
||||
case "lte", "<=":
|
||||
return fmt.Sprintf("%s <= ?", filter.Column), []interface{}{filter.Value}
|
||||
case "like":
|
||||
return fmt.Sprintf("%s LIKE ?", filter.Column), []interface{}{filter.Value}
|
||||
case "ilike":
|
||||
return fmt.Sprintf("%s ILIKE ?", filter.Column), []interface{}{filter.Value}
|
||||
case "in":
|
||||
condition, args := common.BuildInCondition(filter.Column, filter.Value)
|
||||
return condition, args
|
||||
case "is_null":
|
||||
return fmt.Sprintf("%s IS NULL", filter.Column), nil
|
||||
case "is_not_null":
|
||||
return fmt.Sprintf("%s IS NOT NULL", filter.Column), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (h *Handler) applyPreloads(model interface{}, query common.SelectQuery, preloads []common.PreloadOption) (common.SelectQuery, error) {
|
||||
for i := range preloads {
|
||||
preload := &preloads[i]
|
||||
if preload.Relation == "" {
|
||||
continue
|
||||
}
|
||||
query = query.PreloadRelation(preload.Relation)
|
||||
}
|
||||
return query, nil
|
||||
}
|
||||
113
pkg/resolvemcp/hooks.go
Normal file
113
pkg/resolvemcp/hooks.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package resolvemcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
)
|
||||
|
||||
// HookType defines the type of hook to execute
|
||||
type HookType string
|
||||
|
||||
const (
|
||||
// BeforeHandle fires after model resolution, before operation dispatch.
|
||||
BeforeHandle HookType = "before_handle"
|
||||
|
||||
BeforeRead HookType = "before_read"
|
||||
AfterRead HookType = "after_read"
|
||||
|
||||
BeforeCreate HookType = "before_create"
|
||||
AfterCreate HookType = "after_create"
|
||||
|
||||
BeforeUpdate HookType = "before_update"
|
||||
AfterUpdate HookType = "after_update"
|
||||
|
||||
BeforeDelete HookType = "before_delete"
|
||||
AfterDelete HookType = "after_delete"
|
||||
)
|
||||
|
||||
// HookContext contains all the data available to a hook
|
||||
type HookContext struct {
|
||||
Context context.Context
|
||||
Handler *Handler
|
||||
Schema string
|
||||
Entity string
|
||||
Model interface{}
|
||||
Options common.RequestOptions
|
||||
Operation string
|
||||
ID string
|
||||
Data interface{}
|
||||
Result interface{}
|
||||
Error error
|
||||
Query common.SelectQuery
|
||||
Abort bool
|
||||
AbortMessage string
|
||||
AbortCode int
|
||||
Tx common.Database
|
||||
}
|
||||
|
||||
// HookFunc is the signature for hook functions
|
||||
type HookFunc func(*HookContext) error
|
||||
|
||||
// HookRegistry manages all registered hooks
|
||||
type HookRegistry struct {
|
||||
hooks map[HookType][]HookFunc
|
||||
}
|
||||
|
||||
func NewHookRegistry() *HookRegistry {
|
||||
return &HookRegistry{
|
||||
hooks: make(map[HookType][]HookFunc),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *HookRegistry) Register(hookType HookType, hook HookFunc) {
|
||||
if r.hooks == nil {
|
||||
r.hooks = make(map[HookType][]HookFunc)
|
||||
}
|
||||
r.hooks[hookType] = append(r.hooks[hookType], hook)
|
||||
logger.Info("Registered resolvemcp hook for %s (total: %d)", hookType, len(r.hooks[hookType]))
|
||||
}
|
||||
|
||||
func (r *HookRegistry) RegisterMultiple(hookTypes []HookType, hook HookFunc) {
|
||||
for _, hookType := range hookTypes {
|
||||
r.Register(hookType, hook)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *HookRegistry) Execute(hookType HookType, ctx *HookContext) error {
|
||||
hooks, exists := r.hooks[hookType]
|
||||
if !exists || len(hooks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Debug("Executing %d resolvemcp hook(s) for %s", len(hooks), hookType)
|
||||
|
||||
for i, hook := range hooks {
|
||||
if err := hook(ctx); err != nil {
|
||||
logger.Error("resolvemcp hook %d for %s failed: %v", i+1, hookType, err)
|
||||
return fmt.Errorf("hook execution failed: %w", err)
|
||||
}
|
||||
|
||||
if ctx.Abort {
|
||||
logger.Warn("resolvemcp hook %d for %s requested abort: %s", i+1, hookType, ctx.AbortMessage)
|
||||
return fmt.Errorf("operation aborted by hook: %s", ctx.AbortMessage)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HookRegistry) Clear(hookType HookType) {
|
||||
delete(r.hooks, hookType)
|
||||
}
|
||||
|
||||
func (r *HookRegistry) ClearAll() {
|
||||
r.hooks = make(map[HookType][]HookFunc)
|
||||
}
|
||||
|
||||
func (r *HookRegistry) HasHooks(hookType HookType) bool {
|
||||
hooks, exists := r.hooks[hookType]
|
||||
return exists && len(hooks) > 0
|
||||
}
|
||||
100
pkg/resolvemcp/resolvemcp.go
Normal file
100
pkg/resolvemcp/resolvemcp.go
Normal file
@@ -0,0 +1,100 @@
|
||||
// Package resolvemcp exposes registered database models as Model Context Protocol (MCP) tools
|
||||
// and resources over HTTP/SSE transport.
|
||||
//
|
||||
// It mirrors the resolvespec package patterns:
|
||||
// - Same model registration API
|
||||
// - Same filter, sort, cursor pagination, preload options
|
||||
// - Same lifecycle hook system
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// handler := resolvemcp.NewHandlerWithGORM(db, resolvemcp.Config{BaseURL: "http://localhost:8080"})
|
||||
// handler.RegisterModel("public", "users", &User{})
|
||||
//
|
||||
// r := mux.NewRouter()
|
||||
// resolvemcp.SetupMuxRoutes(r, handler)
|
||||
package resolvemcp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/uptrace/bun"
|
||||
bunrouter "github.com/uptrace/bunrouter"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common/adapters/database"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/modelregistry"
|
||||
)
|
||||
|
||||
// Config holds configuration for the resolvemcp handler.
|
||||
type Config struct {
|
||||
// BaseURL is the public-facing base URL of the server (e.g. "http://localhost:8080").
|
||||
// It is sent to MCP clients during the SSE handshake so they know where to POST messages.
|
||||
BaseURL string
|
||||
|
||||
// BasePath is the URL path prefix where the MCP endpoints are mounted (e.g. "/mcp").
|
||||
// If empty, the path is detected from each incoming request automatically.
|
||||
BasePath string
|
||||
}
|
||||
|
||||
// NewHandlerWithGORM creates a Handler backed by a GORM database connection.
|
||||
func NewHandlerWithGORM(db *gorm.DB, cfg Config) *Handler {
|
||||
return NewHandler(database.NewGormAdapter(db), modelregistry.NewModelRegistry(), cfg)
|
||||
}
|
||||
|
||||
// NewHandlerWithBun creates a Handler backed by a Bun database connection.
|
||||
func NewHandlerWithBun(db *bun.DB, cfg Config) *Handler {
|
||||
return NewHandler(database.NewBunAdapter(db), modelregistry.NewModelRegistry(), cfg)
|
||||
}
|
||||
|
||||
// NewHandlerWithDB creates a Handler using an existing common.Database and a new registry.
|
||||
func NewHandlerWithDB(db common.Database, cfg Config) *Handler {
|
||||
return NewHandler(db, modelregistry.NewModelRegistry(), cfg)
|
||||
}
|
||||
|
||||
// SetupMuxRoutes mounts the MCP HTTP/SSE endpoints on the given Gorilla Mux router
|
||||
// using the base path from Config.BasePath (falls back to "/mcp" if empty).
|
||||
//
|
||||
// Two routes are registered:
|
||||
// - GET {basePath}/sse — SSE connection endpoint (client subscribes here)
|
||||
// - POST {basePath}/message — JSON-RPC message endpoint (client sends requests here)
|
||||
//
|
||||
// To protect these routes with authentication, wrap the mux router or apply middleware
|
||||
// before calling SetupMuxRoutes.
|
||||
func SetupMuxRoutes(muxRouter *mux.Router, handler *Handler) {
|
||||
basePath := handler.config.BasePath
|
||||
h := handler.SSEServer()
|
||||
|
||||
muxRouter.Handle(basePath+"/sse", h).Methods("GET", "OPTIONS")
|
||||
muxRouter.Handle(basePath+"/message", h).Methods("POST", "OPTIONS")
|
||||
|
||||
// Convenience: also expose the full SSE server at basePath for clients that
|
||||
// use ServeHTTP directly (e.g. net/http default mux).
|
||||
muxRouter.PathPrefix(basePath).Handler(http.StripPrefix(basePath, h))
|
||||
}
|
||||
|
||||
// SetupBunRouterRoutes mounts the MCP HTTP/SSE endpoints on a bunrouter router
|
||||
// using the base path from Config.BasePath.
|
||||
//
|
||||
// Two routes are registered:
|
||||
// - GET {basePath}/sse — SSE connection endpoint
|
||||
// - POST {basePath}/message — JSON-RPC message endpoint
|
||||
func SetupBunRouterRoutes(router *bunrouter.Router, handler *Handler) {
|
||||
basePath := handler.config.BasePath
|
||||
h := handler.SSEServer()
|
||||
|
||||
router.GET(basePath+"/sse", bunrouter.HTTPHandler(h))
|
||||
router.POST(basePath+"/message", bunrouter.HTTPHandler(h))
|
||||
}
|
||||
|
||||
// NewSSEServer returns an http.Handler that serves MCP over SSE.
|
||||
// If Config.BasePath is set it is used directly; otherwise the base path is
|
||||
// detected from each incoming request (by stripping the "/sse" or "/message" suffix).
|
||||
//
|
||||
// h := resolvemcp.NewSSEServer(handler)
|
||||
// http.Handle("/api/mcp/", h)
|
||||
func NewSSEServer(handler *Handler) http.Handler {
|
||||
return handler.SSEServer()
|
||||
}
|
||||
115
pkg/resolvemcp/security_hooks.go
Normal file
115
pkg/resolvemcp/security_hooks.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package resolvemcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
)
|
||||
|
||||
// RegisterSecurityHooks wires the security package's access-control layer into the
|
||||
// resolvemcp handler. Call it once after creating the handler, before registering models.
|
||||
//
|
||||
// The following controls are applied:
|
||||
// - Per-entity operation rules (CanRead, CanCreate, CanUpdate, CanDelete, CanPublic*)
|
||||
// stored via RegisterModelWithRules / SetModelRules.
|
||||
// - Row-level security: WHERE clause injected per user from the SecurityList provider.
|
||||
// - Column-level security: sensitive columns masked/hidden in read results.
|
||||
// - Audit logging after each read.
|
||||
func RegisterSecurityHooks(handler *Handler, securityList *security.SecurityList) {
|
||||
// BeforeHandle: enforce model-level operation rules (auth check).
|
||||
handler.Hooks().Register(BeforeHandle, func(hookCtx *HookContext) error {
|
||||
if err := security.CheckModelAuthAllowed(newSecurityContext(hookCtx), hookCtx.Operation); err != nil {
|
||||
hookCtx.Abort = true
|
||||
hookCtx.AbortMessage = err.Error()
|
||||
hookCtx.AbortCode = http.StatusUnauthorized
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// BeforeRead (1st): load RLS + CLS rules from the provider into SecurityList.
|
||||
handler.Hooks().Register(BeforeRead, func(hookCtx *HookContext) error {
|
||||
return security.LoadSecurityRules(newSecurityContext(hookCtx), securityList)
|
||||
})
|
||||
|
||||
// BeforeRead (2nd): apply row-level security — injects a WHERE clause into the query.
|
||||
// resolvemcp has no separate BeforeScan hook; the query is available in BeforeRead.
|
||||
handler.Hooks().Register(BeforeRead, func(hookCtx *HookContext) error {
|
||||
return security.ApplyRowSecurity(newSecurityContext(hookCtx), securityList)
|
||||
})
|
||||
|
||||
// AfterRead (1st): apply column-level security — mask/hide columns in the result.
|
||||
handler.Hooks().Register(AfterRead, func(hookCtx *HookContext) error {
|
||||
return security.ApplyColumnSecurity(newSecurityContext(hookCtx), securityList)
|
||||
})
|
||||
|
||||
// AfterRead (2nd): audit log.
|
||||
handler.Hooks().Register(AfterRead, func(hookCtx *HookContext) error {
|
||||
return security.LogDataAccess(newSecurityContext(hookCtx))
|
||||
})
|
||||
|
||||
// BeforeUpdate: enforce CanUpdate rule.
|
||||
handler.Hooks().Register(BeforeUpdate, func(hookCtx *HookContext) error {
|
||||
return security.CheckModelUpdateAllowed(newSecurityContext(hookCtx))
|
||||
})
|
||||
|
||||
// BeforeDelete: enforce CanDelete rule.
|
||||
handler.Hooks().Register(BeforeDelete, func(hookCtx *HookContext) error {
|
||||
return security.CheckModelDeleteAllowed(newSecurityContext(hookCtx))
|
||||
})
|
||||
|
||||
logger.Info("Security hooks registered for resolvemcp handler")
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// securityContext — adapts resolvemcp.HookContext to security.SecurityContext
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
type securityContext struct {
|
||||
ctx *HookContext
|
||||
}
|
||||
|
||||
func newSecurityContext(ctx *HookContext) security.SecurityContext {
|
||||
return &securityContext{ctx: ctx}
|
||||
}
|
||||
|
||||
func (s *securityContext) GetContext() context.Context {
|
||||
return s.ctx.Context
|
||||
}
|
||||
|
||||
func (s *securityContext) GetUserID() (int, bool) {
|
||||
return security.GetUserID(s.ctx.Context)
|
||||
}
|
||||
|
||||
func (s *securityContext) GetSchema() string {
|
||||
return s.ctx.Schema
|
||||
}
|
||||
|
||||
func (s *securityContext) GetEntity() string {
|
||||
return s.ctx.Entity
|
||||
}
|
||||
|
||||
func (s *securityContext) GetModel() interface{} {
|
||||
return s.ctx.Model
|
||||
}
|
||||
|
||||
func (s *securityContext) GetQuery() interface{} {
|
||||
return s.ctx.Query
|
||||
}
|
||||
|
||||
func (s *securityContext) SetQuery(query interface{}) {
|
||||
if q, ok := query.(common.SelectQuery); ok {
|
||||
s.ctx.Query = q
|
||||
}
|
||||
}
|
||||
|
||||
func (s *securityContext) GetResult() interface{} {
|
||||
return s.ctx.Result
|
||||
}
|
||||
|
||||
func (s *securityContext) SetResult(result interface{}) {
|
||||
s.ctx.Result = result
|
||||
}
|
||||
692
pkg/resolvemcp/tools.go
Normal file
692
pkg/resolvemcp/tools.go
Normal file
@@ -0,0 +1,692 @@
|
||||
package resolvemcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/reflection"
|
||||
)
|
||||
|
||||
// toolName builds the MCP tool name for a given operation and model.
|
||||
func toolName(operation, schema, entity string) string {
|
||||
if schema == "" {
|
||||
return fmt.Sprintf("%s_%s", operation, entity)
|
||||
}
|
||||
return fmt.Sprintf("%s_%s_%s", operation, schema, entity)
|
||||
}
|
||||
|
||||
// registerModelTools registers the four CRUD tools and resource for a model.
|
||||
func registerModelTools(h *Handler, schema, entity string, model interface{}) {
|
||||
info := buildModelInfo(schema, entity, model)
|
||||
registerReadTool(h, schema, entity, info)
|
||||
registerCreateTool(h, schema, entity, info)
|
||||
registerUpdateTool(h, schema, entity, info)
|
||||
registerDeleteTool(h, schema, entity, info)
|
||||
registerModelResource(h, schema, entity, info)
|
||||
|
||||
logger.Info("[resolvemcp] Registered MCP tools for %s", info.fullName)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Model introspection
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// modelInfo holds pre-computed metadata for a model used in tool descriptions.
|
||||
type modelInfo struct {
|
||||
fullName string // e.g. "public.users"
|
||||
pkName string // e.g. "id"
|
||||
columns []columnInfo
|
||||
relationNames []string
|
||||
schemaDoc string // formatted multi-line schema listing
|
||||
}
|
||||
|
||||
type columnInfo struct {
|
||||
jsonName string
|
||||
sqlName string
|
||||
goType string
|
||||
sqlType string
|
||||
isPrimary bool
|
||||
isUnique bool
|
||||
isFK bool
|
||||
nullable bool
|
||||
}
|
||||
|
||||
// buildModelInfo extracts column metadata and pre-builds the schema documentation string.
|
||||
func buildModelInfo(schema, entity string, model interface{}) modelInfo {
|
||||
info := modelInfo{
|
||||
fullName: buildModelName(schema, entity),
|
||||
pkName: reflection.GetPrimaryKeyName(model),
|
||||
}
|
||||
|
||||
// Unwrap to base struct type
|
||||
modelType := reflect.TypeOf(model)
|
||||
for modelType != nil && (modelType.Kind() == reflect.Ptr || modelType.Kind() == reflect.Slice) {
|
||||
modelType = modelType.Elem()
|
||||
}
|
||||
if modelType == nil || modelType.Kind() != reflect.Struct {
|
||||
return info
|
||||
}
|
||||
|
||||
details := reflection.GetModelColumnDetail(reflect.New(modelType).Elem())
|
||||
|
||||
for _, d := range details {
|
||||
// Derive the JSON name from the struct field
|
||||
jsonName := fieldJSONName(modelType, d.Name)
|
||||
if jsonName == "" || jsonName == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip relation fields (slice or user-defined struct that isn't time.Time).
|
||||
fieldType, found := modelType.FieldByName(d.Name)
|
||||
if found {
|
||||
ft := fieldType.Type
|
||||
if ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
isUserStruct := ft.Kind() == reflect.Struct && ft.Name() != "Time" && ft.PkgPath() != ""
|
||||
if ft.Kind() == reflect.Slice || isUserStruct {
|
||||
info.relationNames = append(info.relationNames, jsonName)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
sqlName := d.SQLName
|
||||
if sqlName == "" {
|
||||
sqlName = jsonName
|
||||
}
|
||||
|
||||
// Derive Go type name, unwrapping pointer if needed.
|
||||
goType := d.DataType
|
||||
if goType == "" && found {
|
||||
ft := fieldType.Type
|
||||
for ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
goType = ft.Name()
|
||||
}
|
||||
|
||||
// isPrimary: use both the GORM-tag detection and a name comparison against
|
||||
// the known primary key (handles camelCase "primaryKey" tags correctly).
|
||||
isPrimary := d.SQLKey == "primary_key" ||
|
||||
(info.pkName != "" && (sqlName == info.pkName || jsonName == info.pkName))
|
||||
|
||||
ci := columnInfo{
|
||||
jsonName: jsonName,
|
||||
sqlName: sqlName,
|
||||
goType: goType,
|
||||
sqlType: d.SQLDataType,
|
||||
isPrimary: isPrimary,
|
||||
isUnique: d.SQLKey == "unique" || d.SQLKey == "uniqueindex",
|
||||
isFK: d.SQLKey == "foreign_key",
|
||||
nullable: d.Nullable,
|
||||
}
|
||||
info.columns = append(info.columns, ci)
|
||||
}
|
||||
|
||||
info.schemaDoc = buildSchemaDoc(info)
|
||||
return info
|
||||
}
|
||||
|
||||
// fieldJSONName returns the JSON tag name for a struct field, falling back to the field name.
|
||||
func fieldJSONName(modelType reflect.Type, fieldName string) string {
|
||||
field, ok := modelType.FieldByName(fieldName)
|
||||
if !ok {
|
||||
return fieldName
|
||||
}
|
||||
tag := field.Tag.Get("json")
|
||||
if tag == "" {
|
||||
return fieldName
|
||||
}
|
||||
parts := strings.SplitN(tag, ",", 2)
|
||||
if parts[0] == "" {
|
||||
return fieldName
|
||||
}
|
||||
return parts[0]
|
||||
}
|
||||
|
||||
// buildSchemaDoc builds a human-readable column listing for inclusion in tool descriptions.
|
||||
func buildSchemaDoc(info modelInfo) string {
|
||||
if len(info.columns) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Columns:\n")
|
||||
for _, c := range info.columns {
|
||||
line := fmt.Sprintf(" • %s", c.jsonName)
|
||||
|
||||
typeDesc := c.goType
|
||||
if c.sqlType != "" {
|
||||
typeDesc = c.sqlType
|
||||
}
|
||||
if typeDesc != "" {
|
||||
line += fmt.Sprintf(" (%s)", typeDesc)
|
||||
}
|
||||
|
||||
var flags []string
|
||||
if c.isPrimary {
|
||||
flags = append(flags, "primary key")
|
||||
}
|
||||
if c.isUnique {
|
||||
flags = append(flags, "unique")
|
||||
}
|
||||
if c.isFK {
|
||||
flags = append(flags, "foreign key")
|
||||
}
|
||||
if !c.nullable && !c.isPrimary {
|
||||
flags = append(flags, "not null")
|
||||
} else if c.nullable {
|
||||
flags = append(flags, "nullable")
|
||||
}
|
||||
if len(flags) > 0 {
|
||||
line += " — " + strings.Join(flags, ", ")
|
||||
}
|
||||
|
||||
sb.WriteString(line + "\n")
|
||||
}
|
||||
|
||||
if len(info.relationNames) > 0 {
|
||||
sb.WriteString("Relations (preloadable): " + strings.Join(info.relationNames, ", ") + "\n")
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// columnNameList returns a comma-separated list of JSON column names (for descriptions).
|
||||
func columnNameList(cols []columnInfo) string {
|
||||
names := make([]string, len(cols))
|
||||
for i, c := range cols {
|
||||
names[i] = c.jsonName
|
||||
}
|
||||
return strings.Join(names, ", ")
|
||||
}
|
||||
|
||||
// writableColumnNames returns JSON names for all non-primary-key columns.
|
||||
func writableColumnNames(cols []columnInfo) []string {
|
||||
var names []string
|
||||
for _, c := range cols {
|
||||
if !c.isPrimary {
|
||||
names = append(names, c.jsonName)
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Read tool
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func registerReadTool(h *Handler, schema, entity string, info modelInfo) {
|
||||
name := toolName("read", schema, entity)
|
||||
|
||||
var descParts []string
|
||||
descParts = append(descParts, fmt.Sprintf("Read records from the '%s' database table.", info.fullName))
|
||||
if info.pkName != "" {
|
||||
descParts = append(descParts, fmt.Sprintf("Primary key: '%s'. Pass it via 'id' to fetch a single record.", info.pkName))
|
||||
}
|
||||
if info.schemaDoc != "" {
|
||||
descParts = append(descParts, info.schemaDoc)
|
||||
}
|
||||
descParts = append(descParts,
|
||||
"Pagination: use 'limit'/'offset' for offset-based paging, or 'cursor_forward'/'cursor_backward' (pass the primary key value of the last/first record on the current page) for cursor-based paging.",
|
||||
"Filtering: each filter object requires 'column' (JSON field name) and 'operator'. Supported operators: = != > < >= <= like ilike in is_null is_not_null. Combine with 'logic_operator': AND (default) or OR.",
|
||||
"Sorting: each sort object requires 'column' and 'direction' (asc or desc).",
|
||||
)
|
||||
if len(info.relationNames) > 0 {
|
||||
descParts = append(descParts, fmt.Sprintf("Preloadable relations: %s. Pass relation name in 'preloads'.", strings.Join(info.relationNames, ", ")))
|
||||
}
|
||||
|
||||
description := strings.Join(descParts, "\n\n")
|
||||
|
||||
filterDesc := `Array of filter objects. Example: [{"column":"status","operator":"=","value":"active"},{"column":"age","operator":">","value":18,"logic_operator":"AND"}]`
|
||||
if len(info.columns) > 0 {
|
||||
filterDesc += fmt.Sprintf(" Available columns: %s.", columnNameList(info.columns))
|
||||
}
|
||||
|
||||
sortDesc := `Array of sort objects. Example: [{"column":"created_at","direction":"desc"}]`
|
||||
if len(info.columns) > 0 {
|
||||
sortDesc += fmt.Sprintf(" Available columns: %s.", columnNameList(info.columns))
|
||||
}
|
||||
|
||||
tool := mcp.NewTool(name,
|
||||
mcp.WithDescription(description),
|
||||
mcp.WithString("id",
|
||||
mcp.Description(fmt.Sprintf("Primary key (%s) of a single record to fetch. Omit to return multiple records.", info.pkName)),
|
||||
),
|
||||
mcp.WithNumber("limit",
|
||||
mcp.Description("Maximum number of records to return per page. Recommended: 10–100."),
|
||||
),
|
||||
mcp.WithNumber("offset",
|
||||
mcp.Description("Number of records to skip (for offset-based pagination). Use with 'limit'."),
|
||||
),
|
||||
mcp.WithString("cursor_forward",
|
||||
mcp.Description(fmt.Sprintf("Cursor for the next page: pass the '%s' value of the last record on the current page. Requires 'sort' to be set.", info.pkName)),
|
||||
),
|
||||
mcp.WithString("cursor_backward",
|
||||
mcp.Description(fmt.Sprintf("Cursor for the previous page: pass the '%s' value of the first record on the current page. Requires 'sort' to be set.", info.pkName)),
|
||||
),
|
||||
mcp.WithArray("columns",
|
||||
mcp.Description(fmt.Sprintf("Columns to include in the result. Omit to return all columns. Available: %s.", columnNameList(info.columns))),
|
||||
),
|
||||
mcp.WithArray("omit_columns",
|
||||
mcp.Description(fmt.Sprintf("Columns to exclude from the result. Available: %s.", columnNameList(info.columns))),
|
||||
),
|
||||
mcp.WithArray("filters",
|
||||
mcp.Description(filterDesc),
|
||||
),
|
||||
mcp.WithArray("sort",
|
||||
mcp.Description(sortDesc),
|
||||
),
|
||||
mcp.WithArray("preloads",
|
||||
mcp.Description(buildPreloadDesc(info)),
|
||||
),
|
||||
)
|
||||
|
||||
h.mcpServer.AddTool(tool, func(ctx context.Context, req mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
args := req.GetArguments()
|
||||
id, _ := args["id"].(string)
|
||||
options := parseRequestOptions(args)
|
||||
|
||||
data, metadata, err := h.executeRead(ctx, schema, entity, id, options)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError(err.Error()), nil
|
||||
}
|
||||
|
||||
return marshalResult(map[string]interface{}{
|
||||
"success": true,
|
||||
"data": data,
|
||||
"metadata": metadata,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func buildPreloadDesc(info modelInfo) string {
|
||||
if len(info.relationNames) == 0 {
|
||||
return `Array of relation preload objects. Each object: {"relation":"RelationName"}. No relations defined on this model.`
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
`Array of relation preload objects. Each object: {"relation":"RelationName","columns":["col1","col2"]}. Available relations: %s.`,
|
||||
strings.Join(info.relationNames, ", "),
|
||||
)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Create tool
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func registerCreateTool(h *Handler, schema, entity string, info modelInfo) {
|
||||
name := toolName("create", schema, entity)
|
||||
|
||||
writable := writableColumnNames(info.columns)
|
||||
|
||||
var descParts []string
|
||||
descParts = append(descParts, fmt.Sprintf("Create one or more new records in the '%s' table.", info.fullName))
|
||||
if len(writable) > 0 {
|
||||
descParts = append(descParts, fmt.Sprintf("Writable fields: %s.", strings.Join(writable, ", ")))
|
||||
}
|
||||
if info.pkName != "" {
|
||||
descParts = append(descParts, fmt.Sprintf("The primary key ('%s') is typically auto-generated — omit it unless you need to supply it explicitly.", info.pkName))
|
||||
}
|
||||
descParts = append(descParts,
|
||||
"Pass a single JSON object to 'data' to create one record. Pass an array of objects to create multiple records in a single transaction (all succeed or all fail).",
|
||||
)
|
||||
if info.schemaDoc != "" {
|
||||
descParts = append(descParts, info.schemaDoc)
|
||||
}
|
||||
|
||||
description := strings.Join(descParts, "\n\n")
|
||||
|
||||
dataDesc := "Record fields to create."
|
||||
if len(writable) > 0 {
|
||||
dataDesc += fmt.Sprintf(" Writable fields: %s.", strings.Join(writable, ", "))
|
||||
}
|
||||
dataDesc += " Pass a single object or an array of objects."
|
||||
|
||||
tool := mcp.NewTool(name,
|
||||
mcp.WithDescription(description),
|
||||
mcp.WithObject("data",
|
||||
mcp.Description(dataDesc),
|
||||
mcp.Required(),
|
||||
),
|
||||
)
|
||||
|
||||
h.mcpServer.AddTool(tool, func(ctx context.Context, req mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
args := req.GetArguments()
|
||||
data, ok := args["data"]
|
||||
if !ok {
|
||||
return mcp.NewToolResultError("missing required argument: data"), nil
|
||||
}
|
||||
|
||||
result, err := h.executeCreate(ctx, schema, entity, data)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError(err.Error()), nil
|
||||
}
|
||||
|
||||
return marshalResult(map[string]interface{}{
|
||||
"success": true,
|
||||
"data": result,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Update tool
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func registerUpdateTool(h *Handler, schema, entity string, info modelInfo) {
|
||||
name := toolName("update", schema, entity)
|
||||
|
||||
writable := writableColumnNames(info.columns)
|
||||
|
||||
var descParts []string
|
||||
descParts = append(descParts, fmt.Sprintf("Update an existing record in the '%s' table.", info.fullName))
|
||||
if info.pkName != "" {
|
||||
descParts = append(descParts, fmt.Sprintf("Identify the record by its primary key ('%s') via the 'id' argument or by including '%s' inside 'data'.", info.pkName, info.pkName))
|
||||
}
|
||||
if len(writable) > 0 {
|
||||
descParts = append(descParts, fmt.Sprintf("Updatable fields: %s.", strings.Join(writable, ", ")))
|
||||
}
|
||||
descParts = append(descParts,
|
||||
"Only non-null, non-empty fields in 'data' are applied — existing values are preserved for fields you omit. Returns the merged record as stored.",
|
||||
)
|
||||
if info.schemaDoc != "" {
|
||||
descParts = append(descParts, info.schemaDoc)
|
||||
}
|
||||
|
||||
description := strings.Join(descParts, "\n\n")
|
||||
|
||||
idDesc := fmt.Sprintf("Primary key ('%s') of the record to update. Can also be included inside 'data'.", info.pkName)
|
||||
|
||||
dataDesc := "Fields to update (non-null, non-empty values are merged into the existing record)."
|
||||
if len(writable) > 0 {
|
||||
dataDesc += fmt.Sprintf(" Updatable fields: %s.", strings.Join(writable, ", "))
|
||||
}
|
||||
|
||||
tool := mcp.NewTool(name,
|
||||
mcp.WithDescription(description),
|
||||
mcp.WithString("id",
|
||||
mcp.Description(idDesc),
|
||||
),
|
||||
mcp.WithObject("data",
|
||||
mcp.Description(dataDesc),
|
||||
mcp.Required(),
|
||||
),
|
||||
)
|
||||
|
||||
h.mcpServer.AddTool(tool, func(ctx context.Context, req mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
args := req.GetArguments()
|
||||
id, _ := args["id"].(string)
|
||||
|
||||
data, ok := args["data"]
|
||||
if !ok {
|
||||
return mcp.NewToolResultError("missing required argument: data"), nil
|
||||
}
|
||||
dataMap, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
return mcp.NewToolResultError("data must be an object"), nil
|
||||
}
|
||||
|
||||
result, err := h.executeUpdate(ctx, schema, entity, id, dataMap)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError(err.Error()), nil
|
||||
}
|
||||
|
||||
return marshalResult(map[string]interface{}{
|
||||
"success": true,
|
||||
"data": result,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Delete tool
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func registerDeleteTool(h *Handler, schema, entity string, info modelInfo) {
|
||||
name := toolName("delete", schema, entity)
|
||||
|
||||
descParts := []string{
|
||||
fmt.Sprintf("Delete a record from the '%s' table by its primary key.", info.fullName),
|
||||
}
|
||||
if info.pkName != "" {
|
||||
descParts = append(descParts, fmt.Sprintf("Pass the '%s' value of the record to delete via the 'id' argument.", info.pkName))
|
||||
}
|
||||
descParts = append(descParts, "Returns the deleted record. This operation is irreversible.")
|
||||
|
||||
description := strings.Join(descParts, " ")
|
||||
|
||||
tool := mcp.NewTool(name,
|
||||
mcp.WithDescription(description),
|
||||
mcp.WithString("id",
|
||||
mcp.Description(fmt.Sprintf("Primary key ('%s') of the record to delete.", info.pkName)),
|
||||
mcp.Required(),
|
||||
),
|
||||
)
|
||||
|
||||
h.mcpServer.AddTool(tool, func(ctx context.Context, req mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
args := req.GetArguments()
|
||||
id, _ := args["id"].(string)
|
||||
|
||||
result, err := h.executeDelete(ctx, schema, entity, id)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError(err.Error()), nil
|
||||
}
|
||||
|
||||
return marshalResult(map[string]interface{}{
|
||||
"success": true,
|
||||
"data": result,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Resource registration
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func registerModelResource(h *Handler, schema, entity string, info modelInfo) {
|
||||
resourceURI := info.fullName
|
||||
|
||||
var resourceDesc strings.Builder
|
||||
fmt.Fprintf(&resourceDesc, "Database table: %s", info.fullName)
|
||||
if info.pkName != "" {
|
||||
fmt.Fprintf(&resourceDesc, " (primary key: %s)", info.pkName)
|
||||
}
|
||||
if info.schemaDoc != "" {
|
||||
resourceDesc.WriteString("\n\n")
|
||||
resourceDesc.WriteString(info.schemaDoc)
|
||||
}
|
||||
|
||||
resource := mcp.NewResource(
|
||||
resourceURI,
|
||||
entity,
|
||||
mcp.WithResourceDescription(resourceDesc.String()),
|
||||
mcp.WithMIMEType("application/json"),
|
||||
)
|
||||
|
||||
h.mcpServer.AddResource(resource, func(ctx context.Context, req mcp.ReadResourceRequest) ([]mcp.ResourceContents, error) {
|
||||
limit := 100
|
||||
options := common.RequestOptions{Limit: &limit}
|
||||
|
||||
data, metadata, err := h.executeRead(ctx, schema, entity, "", options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"data": data,
|
||||
"metadata": metadata,
|
||||
}
|
||||
jsonBytes, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling resource: %w", err)
|
||||
}
|
||||
|
||||
return []mcp.ResourceContents{
|
||||
mcp.TextResourceContents{
|
||||
URI: req.Params.URI,
|
||||
MIMEType: "application/json",
|
||||
Text: string(jsonBytes),
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Argument parsing helpers
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// parseRequestOptions converts raw MCP tool arguments into common.RequestOptions.
|
||||
func parseRequestOptions(args map[string]interface{}) common.RequestOptions {
|
||||
options := common.RequestOptions{}
|
||||
|
||||
if v, ok := args["limit"]; ok {
|
||||
switch n := v.(type) {
|
||||
case float64:
|
||||
limit := int(n)
|
||||
options.Limit = &limit
|
||||
case int:
|
||||
options.Limit = &n
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := args["offset"]; ok {
|
||||
switch n := v.(type) {
|
||||
case float64:
|
||||
offset := int(n)
|
||||
options.Offset = &offset
|
||||
case int:
|
||||
options.Offset = &n
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := args["cursor_forward"].(string); ok {
|
||||
options.CursorForward = v
|
||||
}
|
||||
if v, ok := args["cursor_backward"].(string); ok {
|
||||
options.CursorBackward = v
|
||||
}
|
||||
|
||||
options.Columns = parseStringArray(args["columns"])
|
||||
options.OmitColumns = parseStringArray(args["omit_columns"])
|
||||
options.Filters = parseFilters(args["filters"])
|
||||
options.Sort = parseSortOptions(args["sort"])
|
||||
options.Preload = parsePreloadOptions(args["preloads"])
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
func parseStringArray(raw interface{}) []string {
|
||||
if raw == nil {
|
||||
return nil
|
||||
}
|
||||
items, ok := raw.([]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(items))
|
||||
for _, item := range items {
|
||||
if s, ok := item.(string); ok {
|
||||
result = append(result, s)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseFilters(raw interface{}) []common.FilterOption {
|
||||
if raw == nil {
|
||||
return nil
|
||||
}
|
||||
items, ok := raw.([]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
result := make([]common.FilterOption, 0, len(items))
|
||||
for _, item := range items {
|
||||
b, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var f common.FilterOption
|
||||
if err := json.Unmarshal(b, &f); err != nil {
|
||||
continue
|
||||
}
|
||||
if f.Column == "" || f.Operator == "" {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(f.LogicOperator, "or") {
|
||||
f.LogicOperator = "OR"
|
||||
} else {
|
||||
f.LogicOperator = "AND"
|
||||
}
|
||||
result = append(result, f)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseSortOptions(raw interface{}) []common.SortOption {
|
||||
if raw == nil {
|
||||
return nil
|
||||
}
|
||||
items, ok := raw.([]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
result := make([]common.SortOption, 0, len(items))
|
||||
for _, item := range items {
|
||||
b, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var s common.SortOption
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
continue
|
||||
}
|
||||
if s.Column == "" {
|
||||
continue
|
||||
}
|
||||
result = append(result, s)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parsePreloadOptions(raw interface{}) []common.PreloadOption {
|
||||
if raw == nil {
|
||||
return nil
|
||||
}
|
||||
items, ok := raw.([]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
result := make([]common.PreloadOption, 0, len(items))
|
||||
for _, item := range items {
|
||||
b, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var p common.PreloadOption
|
||||
if err := json.Unmarshal(b, &p); err != nil {
|
||||
continue
|
||||
}
|
||||
if p.Relation == "" {
|
||||
continue
|
||||
}
|
||||
result = append(result, p)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// marshalResult marshals a value to JSON and returns it as an MCP text result.
|
||||
func marshalResult(v interface{}) (*mcp.CallToolResult, error) {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError(fmt.Sprintf("error marshaling result: %v", err)), nil
|
||||
}
|
||||
return mcp.NewToolResultText(string(b)), nil
|
||||
}
|
||||
572
pkg/resolvespec/EXAMPLES.md
Normal file
572
pkg/resolvespec/EXAMPLES.md
Normal file
@@ -0,0 +1,572 @@
|
||||
# ResolveSpec Query Features Examples
|
||||
|
||||
This document provides examples of using the advanced query features in ResolveSpec, including OR logic filters, Custom Operators, and FetchRowNumber.
|
||||
|
||||
## OR Logic in Filters (SearchOr)
|
||||
|
||||
### Basic OR Filter Example
|
||||
|
||||
Find all users with status "active" OR "pending":
|
||||
|
||||
```json
|
||||
POST /users
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "active"
|
||||
},
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "pending",
|
||||
"logic_operator": "OR"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Combined AND/OR Filters
|
||||
|
||||
Find users with (status="active" OR status="pending") AND age >= 18:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "active"
|
||||
},
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "pending",
|
||||
"logic_operator": "OR"
|
||||
},
|
||||
{
|
||||
"column": "age",
|
||||
"operator": "gte",
|
||||
"value": 18
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**SQL Generated:** `WHERE (status = 'active' OR status = 'pending') AND age >= 18`
|
||||
|
||||
**Important Notes:**
|
||||
- By default, filters use AND logic
|
||||
- Consecutive filters with `"logic_operator": "OR"` are automatically grouped with parentheses
|
||||
- This grouping ensures OR conditions don't interfere with AND conditions
|
||||
- You don't need to specify `"logic_operator": "AND"` as it's the default
|
||||
|
||||
### Multiple OR Groups
|
||||
|
||||
You can have multiple separate OR groups:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "active"
|
||||
},
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "pending",
|
||||
"logic_operator": "OR"
|
||||
},
|
||||
{
|
||||
"column": "priority",
|
||||
"operator": "eq",
|
||||
"value": "high"
|
||||
},
|
||||
{
|
||||
"column": "priority",
|
||||
"operator": "eq",
|
||||
"value": "urgent",
|
||||
"logic_operator": "OR"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**SQL Generated:** `WHERE (status = 'active' OR status = 'pending') AND (priority = 'high' OR priority = 'urgent')`
|
||||
|
||||
## Custom Operators
|
||||
|
||||
### Simple Custom SQL Condition
|
||||
|
||||
Filter by email domain using custom SQL:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"customOperators": [
|
||||
{
|
||||
"name": "company_emails",
|
||||
"sql": "email LIKE '%@company.com'"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Multiple Custom Operators
|
||||
|
||||
Combine multiple custom SQL conditions:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"customOperators": [
|
||||
{
|
||||
"name": "recent_active",
|
||||
"sql": "last_login > NOW() - INTERVAL '30 days'"
|
||||
},
|
||||
{
|
||||
"name": "high_score",
|
||||
"sql": "score > 1000"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Complex Custom Operator
|
||||
|
||||
Use complex SQL expressions:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"customOperators": [
|
||||
{
|
||||
"name": "priority_users",
|
||||
"sql": "(subscription = 'premium' AND points > 500) OR (subscription = 'enterprise')"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Combining Custom Operators with Regular Filters
|
||||
|
||||
Mix custom operators with standard filters:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "country",
|
||||
"operator": "eq",
|
||||
"value": "USA"
|
||||
}
|
||||
],
|
||||
"customOperators": [
|
||||
{
|
||||
"name": "active_last_month",
|
||||
"sql": "last_activity > NOW() - INTERVAL '1 month'"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Row Numbers
|
||||
|
||||
### Two Ways to Get Row Numbers
|
||||
|
||||
There are two different features for row numbers:
|
||||
|
||||
1. **`fetch_row_number`** - Get the position of ONE specific record in a sorted/filtered set
|
||||
2. **`RowNumber` field in models** - Automatically number all records in the response
|
||||
|
||||
### 1. FetchRowNumber - Get Position of Specific Record
|
||||
|
||||
Get the rank/position of a specific user in a leaderboard. **Important:** When `fetch_row_number` is specified, the response contains **ONLY that specific record**, not all records.
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"sort": [
|
||||
{
|
||||
"column": "score",
|
||||
"direction": "desc"
|
||||
}
|
||||
],
|
||||
"fetch_row_number": "12345"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Response - Contains ONLY the specified user:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"id": 12345,
|
||||
"name": "Alice Smith",
|
||||
"score": 9850,
|
||||
"level": 42
|
||||
},
|
||||
"metadata": {
|
||||
"total": 10000,
|
||||
"count": 1,
|
||||
"filtered": 10000,
|
||||
"row_number": 42
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Result:** User "12345" is ranked #42 out of 10,000 users. The response includes only Alice's data, not the other 9,999 users.
|
||||
|
||||
### Row Number with Filters
|
||||
|
||||
Find position within a filtered subset (e.g., "What's my rank in my country?"):
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "country",
|
||||
"operator": "eq",
|
||||
"value": "USA"
|
||||
},
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "active"
|
||||
}
|
||||
],
|
||||
"sort": [
|
||||
{
|
||||
"column": "score",
|
||||
"direction": "desc"
|
||||
}
|
||||
],
|
||||
"fetch_row_number": "12345"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"id": 12345,
|
||||
"name": "Bob Johnson",
|
||||
"country": "USA",
|
||||
"score": 7200,
|
||||
"status": "active"
|
||||
},
|
||||
"metadata": {
|
||||
"total": 2500,
|
||||
"count": 1,
|
||||
"filtered": 2500,
|
||||
"row_number": 156
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Result:** Bob is ranked #156 out of 2,500 active USA users. Only Bob's record is returned.
|
||||
|
||||
### 2. RowNumber Field - Auto-Number All Records
|
||||
|
||||
If your model has a `RowNumber int64` field, restheadspec will automatically populate it for paginated results.
|
||||
|
||||
**Model Definition:**
|
||||
```go
|
||||
type Player struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Score int64 `json:"score"`
|
||||
RowNumber int64 `json:"row_number"` // Will be auto-populated
|
||||
}
|
||||
```
|
||||
|
||||
**Request (with pagination):**
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"sort": [{"column": "score", "direction": "desc"}],
|
||||
"limit": 10,
|
||||
"offset": 20
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Response - RowNumber automatically set:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": [
|
||||
{
|
||||
"id": 456,
|
||||
"name": "Player21",
|
||||
"score": 8900,
|
||||
"row_number": 21
|
||||
},
|
||||
{
|
||||
"id": 789,
|
||||
"name": "Player22",
|
||||
"score": 8850,
|
||||
"row_number": 22
|
||||
},
|
||||
{
|
||||
"id": 123,
|
||||
"name": "Player23",
|
||||
"score": 8800,
|
||||
"row_number": 23
|
||||
}
|
||||
// ... records 24-30 ...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**How It Works:**
|
||||
- `row_number = offset + index + 1` (1-based)
|
||||
- With offset=20, first record gets row_number=21
|
||||
- With offset=20, second record gets row_number=22
|
||||
- Perfect for displaying "Rank" in paginated tables
|
||||
|
||||
**Use Case:** Displaying leaderboards with rank numbers:
|
||||
```
|
||||
Rank | Player | Score
|
||||
-----|-----------|-------
|
||||
21 | Player21 | 8900
|
||||
22 | Player22 | 8850
|
||||
23 | Player23 | 8800
|
||||
```
|
||||
|
||||
**Note:** This feature is available in all three packages: resolvespec, restheadspec, and websocketspec.
|
||||
|
||||
### When to Use Each Feature
|
||||
|
||||
| Feature | Use Case | Returns | Performance |
|
||||
|---------|----------|---------|-------------|
|
||||
| `fetch_row_number` | "What's my rank?" | 1 record with position | Fast - 1 record |
|
||||
| `RowNumber` field | "Show top 10 with ranks" | Many records numbered | Fast - simple math |
|
||||
|
||||
**Combined Example - Full Leaderboard UI:**
|
||||
|
||||
```javascript
|
||||
// Request 1: Get current user's rank
|
||||
const userRank = await api.read({
|
||||
fetch_row_number: currentUserId,
|
||||
sort: [{column: "score", direction: "desc"}]
|
||||
});
|
||||
// Returns: {id: 123, name: "You", score: 7500, row_number: 156}
|
||||
|
||||
// Request 2: Get top 10 with rank numbers
|
||||
const top10 = await api.read({
|
||||
sort: [{column: "score", direction: "desc"}],
|
||||
limit: 10,
|
||||
offset: 0
|
||||
});
|
||||
// Returns: [{row_number: 1, ...}, {row_number: 2, ...}, ...]
|
||||
|
||||
// Display:
|
||||
// "Your Rank: #156"
|
||||
// "Top Players:"
|
||||
// "#1 - Alice - 9999"
|
||||
// "#2 - Bob - 9876"
|
||||
// ...
|
||||
```
|
||||
|
||||
## Complete Example: Advanced Query
|
||||
|
||||
Combine all features for a complex query:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"columns": ["id", "name", "email", "score", "status"],
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "active"
|
||||
},
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "trial",
|
||||
"logic_operator": "OR"
|
||||
},
|
||||
{
|
||||
"column": "score",
|
||||
"operator": "gte",
|
||||
"value": 100
|
||||
}
|
||||
],
|
||||
"customOperators": [
|
||||
{
|
||||
"name": "recent_activity",
|
||||
"sql": "last_login > NOW() - INTERVAL '7 days'"
|
||||
},
|
||||
{
|
||||
"name": "verified_email",
|
||||
"sql": "email_verified = true"
|
||||
}
|
||||
],
|
||||
"sort": [
|
||||
{
|
||||
"column": "score",
|
||||
"direction": "desc"
|
||||
},
|
||||
{
|
||||
"column": "created_at",
|
||||
"direction": "asc"
|
||||
}
|
||||
],
|
||||
"fetch_row_number": "12345",
|
||||
"limit": 50,
|
||||
"offset": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This query:
|
||||
- Selects specific columns
|
||||
- Filters for users with status "active" OR "trial"
|
||||
- AND score >= 100
|
||||
- Applies custom SQL conditions for recent activity and verified emails
|
||||
- Sorts by score (descending) then creation date (ascending)
|
||||
- Returns the row number of user "12345" in this filtered/sorted set
|
||||
- Returns 50 records starting from the first one
|
||||
|
||||
## Use Cases
|
||||
|
||||
### 1. Leaderboards - Get Current User's Rank
|
||||
|
||||
Get the current user's position and data (returns only their record):
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "game_id",
|
||||
"operator": "eq",
|
||||
"value": "game123"
|
||||
}
|
||||
],
|
||||
"sort": [
|
||||
{
|
||||
"column": "score",
|
||||
"direction": "desc"
|
||||
}
|
||||
],
|
||||
"fetch_row_number": "current_user_id"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Tip:** For full leaderboards, make two requests:
|
||||
1. One with `fetch_row_number` to get user's rank
|
||||
2. One with `limit` and `offset` to get top players list
|
||||
|
||||
### 2. Multi-Status Search
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "order_status",
|
||||
"operator": "eq",
|
||||
"value": "pending"
|
||||
},
|
||||
{
|
||||
"column": "order_status",
|
||||
"operator": "eq",
|
||||
"value": "processing",
|
||||
"logic_operator": "OR"
|
||||
},
|
||||
{
|
||||
"column": "order_status",
|
||||
"operator": "eq",
|
||||
"value": "shipped",
|
||||
"logic_operator": "OR"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Advanced Date Filtering
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"customOperators": [
|
||||
{
|
||||
"name": "this_month",
|
||||
"sql": "created_at >= DATE_TRUNC('month', CURRENT_DATE)"
|
||||
},
|
||||
{
|
||||
"name": "business_hours",
|
||||
"sql": "EXTRACT(HOUR FROM created_at) BETWEEN 9 AND 17"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
**Warning:** Custom operators allow raw SQL, which can be a security risk if not properly handled:
|
||||
|
||||
1. **Never** directly interpolate user input into custom operator SQL
|
||||
2. Always validate and sanitize custom operator SQL on the backend
|
||||
3. Consider using a whitelist of allowed custom operators
|
||||
4. Use prepared statements or parameterized queries when possible
|
||||
5. Implement proper authorization checks before executing queries
|
||||
|
||||
Example of safe custom operator handling in Go:
|
||||
|
||||
```go
|
||||
// Whitelist of allowed custom operators
|
||||
allowedOperators := map[string]string{
|
||||
"recent_week": "created_at > NOW() - INTERVAL '7 days'",
|
||||
"active_users": "status = 'active' AND last_login > NOW() - INTERVAL '30 days'",
|
||||
"premium_only": "subscription_level = 'premium'",
|
||||
}
|
||||
|
||||
// Validate custom operators from request
|
||||
for _, op := range req.Options.CustomOperators {
|
||||
if sql, ok := allowedOperators[op.Name]; ok {
|
||||
op.SQL = sql // Use whitelisted SQL
|
||||
} else {
|
||||
return errors.New("custom operator not allowed: " + op.Name)
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -214,6 +214,146 @@ Content-Type: application/json
|
||||
|
||||
```json
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "active"
|
||||
},
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "pending",
|
||||
"logic_operator": "OR"
|
||||
},
|
||||
{
|
||||
"column": "age",
|
||||
"operator": "gte",
|
||||
"value": 18
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Produces: `WHERE (status = 'active' OR status = 'pending') AND age >= 18`
|
||||
|
||||
This grouping ensures OR conditions don't interfere with other AND conditions in the query.
|
||||
|
||||
### Custom Operators
|
||||
|
||||
Add custom SQL conditions when needed:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"customOperators": [
|
||||
{
|
||||
"name": "email_domain_filter",
|
||||
"sql": "LOWER(email) LIKE '%@example.com'"
|
||||
},
|
||||
{
|
||||
"name": "recent_records",
|
||||
"sql": "created_at > NOW() - INTERVAL '7 days'"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Custom operators are applied as additional WHERE conditions to your query.
|
||||
|
||||
### Fetch Row Number
|
||||
|
||||
Get the row number (position) of a specific record in the filtered and sorted result set. **When `fetch_row_number` is specified, only that specific record is returned** (not all records).
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "active"
|
||||
}
|
||||
],
|
||||
"sort": [
|
||||
{
|
||||
"column": "score",
|
||||
"direction": "desc"
|
||||
}
|
||||
],
|
||||
"fetch_row_number": "12345"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Response - Returns ONLY the specified record with its position:**
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"id": 12345,
|
||||
"name": "John Doe",
|
||||
"score": 850,
|
||||
"status": "active"
|
||||
},
|
||||
"metadata": {
|
||||
"total": 1000,
|
||||
"count": 1,
|
||||
"filtered": 1000,
|
||||
"row_number": 42
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Use Case:** Perfect for "Show me this user and their ranking" - you get just that one user with their position in the leaderboard.
|
||||
|
||||
**Note:** This is different from the `RowNumber` field feature, which automatically numbers all records in a paginated response based on offset. That feature uses simple math (`offset + index + 1`), while `fetch_row_number` uses SQL window functions to calculate the actual position in a sorted/filtered set. To use the `RowNumber` field feature, simply add a `RowNumber int64` field to your model - it will be automatically populated with the row position based on pagination.
|
||||
|
||||
## Preloading
|
||||
|
||||
Load related entities with custom configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"operation": "read",
|
||||
"options": {
|
||||
"columns": ["id", "name", "email"],
|
||||
"preload": [
|
||||
{
|
||||
"relation": "posts",
|
||||
"columns": ["id", "title", "created_at"],
|
||||
"filters": [
|
||||
{
|
||||
"column": "status",
|
||||
"operator": "eq",
|
||||
"value": "published"
|
||||
}
|
||||
],
|
||||
"sort": [
|
||||
{
|
||||
"column": "created_at",
|
||||
"direction": "desc"
|
||||
}
|
||||
],
|
||||
"limit": 5
|
||||
},
|
||||
{
|
||||
"relation": "profile",
|
||||
"columns": ["bio", "website"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Cursor Pagination
|
||||
|
||||
Efficient pagination for large datasets:
|
||||
|
||||
### First Request (No Cursor)
|
||||
|
||||
```json
|
||||
@@ -427,7 +567,7 @@ Define virtual columns using SQL expressions:
|
||||
// Check permissions
|
||||
if !userHasPermission(ctx.Context, ctx.Entity) {
|
||||
return fmt.Errorf("unauthorized access to %s", ctx.Entity)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Modify query options
|
||||
if ctx.Options.Limit == nil || *ctx.Options.Limit > 100 {
|
||||
@@ -435,17 +575,24 @@ Add custom SQL conditions when needed:
|
||||
}
|
||||
|
||||
return nil
|
||||
users[i].Email = maskEmail(users[i].Email)
|
||||
}
|
||||
})
|
||||
|
||||
// Register an after-read hook (e.g., for data transformation)
|
||||
handler.Hooks().Register(resolvespec.AfterRead, func(ctx *resolvespec.HookContext) error {
|
||||
})
|
||||
// Transform or filter results
|
||||
if users, ok := ctx.Result.([]User); ok {
|
||||
for i := range users {
|
||||
users[i].Email = maskEmail(users[i].Email)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Register a before-create hook (e.g., for validation)
|
||||
handler.Hooks().Register(resolvespec.BeforeCreate, func(ctx *resolvespec.HookContext) error {
|
||||
// Validate data
|
||||
if user, ok := ctx.Data.(*User); ok {
|
||||
if user.Email == "" {
|
||||
return fmt.Errorf("email is required")
|
||||
}
|
||||
// Add timestamps
|
||||
@@ -497,6 +644,7 @@ handler.Hooks().Register(resolvespec.BeforeCreate, func(ctx *resolvespec.HookCon
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Tags []Tag `json:"tags,omitempty" gorm:"many2many:post_tags"`
|
||||
}
|
||||
|
||||
// Schema.Table format
|
||||
handler.registry.RegisterModel("core.users", &User{})
|
||||
handler.registry.RegisterModel("core.posts", &Post{})
|
||||
@@ -507,11 +655,13 @@ handler.Hooks().Register(resolvespec.BeforeCreate, func(ctx *resolvespec.HookCon
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/resolvespec"
|
||||
"github.com/gorilla/mux"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ const (
|
||||
// - pkName: primary key column (e.g. "id")
|
||||
// - modelColumns: optional list of valid main-table columns (for validation). Pass nil to skip.
|
||||
// - options: the request options containing sort and cursor information
|
||||
// - expandJoins: optional map[alias]string of JOIN clauses for join-column sort support
|
||||
//
|
||||
// Returns SQL snippet to embed in WHERE clause.
|
||||
func GetCursorFilter(
|
||||
@@ -31,8 +32,10 @@ func GetCursorFilter(
|
||||
pkName string,
|
||||
modelColumns []string,
|
||||
options common.RequestOptions,
|
||||
expandJoins map[string]string,
|
||||
) (string, error) {
|
||||
// Remove schema prefix if present
|
||||
// Separate schema prefix from bare table name
|
||||
fullTableName := tableName
|
||||
if strings.Contains(tableName, ".") {
|
||||
tableName = strings.SplitN(tableName, ".", 2)[1]
|
||||
}
|
||||
@@ -57,18 +60,19 @@ func GetCursorFilter(
|
||||
// 3. Prepare
|
||||
// --------------------------------------------------------------------- //
|
||||
var whereClauses []string
|
||||
joinSQL := ""
|
||||
reverse := direction < 0
|
||||
|
||||
// --------------------------------------------------------------------- //
|
||||
// 4. Process each sort column
|
||||
// --------------------------------------------------------------------- //
|
||||
for _, s := range sortItems {
|
||||
col := strings.TrimSpace(s.Column)
|
||||
col := strings.Trim(strings.TrimSpace(s.Column), "()")
|
||||
if col == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse: "created_at", "user.name", etc.
|
||||
// Parse: "created_at", "user.name", "fn.sortorder", etc.
|
||||
parts := strings.Split(col, ".")
|
||||
field := strings.TrimSpace(parts[len(parts)-1])
|
||||
prefix := strings.Join(parts[:len(parts)-1], ".")
|
||||
@@ -81,7 +85,7 @@ func GetCursorFilter(
|
||||
}
|
||||
|
||||
// Resolve column
|
||||
cursorCol, targetCol, err := resolveColumn(
|
||||
cursorCol, targetCol, isJoin, err := resolveColumn(
|
||||
field, prefix, tableName, modelColumns,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -89,6 +93,22 @@ func GetCursorFilter(
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle joins
|
||||
if isJoin {
|
||||
if expandJoins != nil {
|
||||
if joinClause, ok := expandJoins[prefix]; ok {
|
||||
jSQL, cRef := rewriteJoin(joinClause, tableName, prefix)
|
||||
joinSQL = jSQL
|
||||
cursorCol = cRef + "." + field
|
||||
targetCol = prefix + "." + field
|
||||
}
|
||||
}
|
||||
if cursorCol == "" {
|
||||
logger.Warn("Skipping cursor sort column %q: join alias %q not in expandJoins", col, prefix)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Build inequality
|
||||
op := "<"
|
||||
if desc {
|
||||
@@ -112,10 +132,12 @@ func GetCursorFilter(
|
||||
query := fmt.Sprintf(`EXISTS (
|
||||
SELECT 1
|
||||
FROM %s cursor_select
|
||||
%s
|
||||
WHERE cursor_select.%s = %s
|
||||
AND (%s)
|
||||
)`,
|
||||
tableName,
|
||||
fullTableName,
|
||||
joinSQL,
|
||||
pkName,
|
||||
cursorID,
|
||||
orSQL,
|
||||
@@ -136,35 +158,44 @@ func getActiveCursor(options common.RequestOptions) (id string, direction Cursor
|
||||
return "", 0
|
||||
}
|
||||
|
||||
// Helper: resolve column (main table only for now)
|
||||
// Helper: resolve column (main table or join)
|
||||
func resolveColumn(
|
||||
field, prefix, tableName string,
|
||||
modelColumns []string,
|
||||
) (cursorCol, targetCol string, err error) {
|
||||
) (cursorCol, targetCol string, isJoin bool, err error) {
|
||||
|
||||
// JSON field
|
||||
if strings.Contains(field, "->") {
|
||||
return "cursor_select." + field, tableName + "." + field, nil
|
||||
return "cursor_select." + field, tableName + "." + field, false, nil
|
||||
}
|
||||
|
||||
// Main table column
|
||||
if modelColumns != nil {
|
||||
for _, col := range modelColumns {
|
||||
if strings.EqualFold(col, field) {
|
||||
return "cursor_select." + field, tableName + "." + field, nil
|
||||
return "cursor_select." + field, tableName + "." + field, false, nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No validation → allow all main-table fields
|
||||
return "cursor_select." + field, tableName + "." + field, nil
|
||||
return "cursor_select." + field, tableName + "." + field, false, nil
|
||||
}
|
||||
|
||||
// Joined column (not supported in resolvespec yet)
|
||||
// Joined column
|
||||
if prefix != "" && prefix != tableName {
|
||||
return "", "", fmt.Errorf("joined columns not supported in cursor pagination: %s", field)
|
||||
return "", "", true, nil
|
||||
}
|
||||
|
||||
return "", "", fmt.Errorf("invalid column: %s", field)
|
||||
return "", "", false, fmt.Errorf("invalid column: %s", field)
|
||||
}
|
||||
|
||||
// Helper: rewrite JOIN clause for cursor subquery
|
||||
func rewriteJoin(joinClause, mainTable, alias string) (joinSQL, cursorAlias string) {
|
||||
joinSQL = strings.ReplaceAll(joinClause, mainTable+".", "cursor_select.")
|
||||
cursorAlias = "cursor_select_" + alias
|
||||
joinSQL = strings.ReplaceAll(joinSQL, " "+alias+" ", " "+cursorAlias+" ")
|
||||
joinSQL = strings.ReplaceAll(joinSQL, " "+alias+".", " "+cursorAlias+".")
|
||||
return joinSQL, cursorAlias
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------- //
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestGetCursorFilter_Forward(t *testing.T) {
|
||||
pkName := "id"
|
||||
modelColumns := []string{"id", "title", "created_at", "user_id"}
|
||||
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options)
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCursorFilter failed: %v", err)
|
||||
}
|
||||
@@ -65,7 +65,7 @@ func TestGetCursorFilter_Backward(t *testing.T) {
|
||||
pkName := "id"
|
||||
modelColumns := []string{"id", "title", "created_at", "user_id"}
|
||||
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options)
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCursorFilter failed: %v", err)
|
||||
}
|
||||
@@ -96,7 +96,7 @@ func TestGetCursorFilter_NoCursor(t *testing.T) {
|
||||
pkName := "id"
|
||||
modelColumns := []string{"id", "title", "created_at"}
|
||||
|
||||
_, err := GetCursorFilter(tableName, pkName, modelColumns, options)
|
||||
_, err := GetCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err == nil {
|
||||
t.Error("Expected error when no cursor is provided")
|
||||
}
|
||||
@@ -116,7 +116,7 @@ func TestGetCursorFilter_NoSort(t *testing.T) {
|
||||
pkName := "id"
|
||||
modelColumns := []string{"id", "title"}
|
||||
|
||||
_, err := GetCursorFilter(tableName, pkName, modelColumns, options)
|
||||
_, err := GetCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err == nil {
|
||||
t.Error("Expected error when no sort columns are defined")
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func TestGetCursorFilter_MultiColumnSort(t *testing.T) {
|
||||
pkName := "id"
|
||||
modelColumns := []string{"id", "title", "priority", "created_at"}
|
||||
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options)
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCursorFilter failed: %v", err)
|
||||
}
|
||||
@@ -170,19 +170,50 @@ func TestGetCursorFilter_WithSchemaPrefix(t *testing.T) {
|
||||
pkName := "id"
|
||||
modelColumns := []string{"id", "name", "email"}
|
||||
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options)
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCursorFilter failed: %v", err)
|
||||
}
|
||||
|
||||
// Should handle schema prefix properly
|
||||
if !strings.Contains(filter, "users") {
|
||||
t.Errorf("Filter should reference table name users, got: %s", filter)
|
||||
// Should include full schema-qualified name in FROM clause
|
||||
if !strings.Contains(filter, "public.users") {
|
||||
t.Errorf("Filter FROM clause should use schema-qualified name public.users, got: %s", filter)
|
||||
}
|
||||
|
||||
t.Logf("Generated cursor filter with schema: %s", filter)
|
||||
}
|
||||
|
||||
func TestGetCursorFilter_LateralJoin(t *testing.T) {
|
||||
lateralJoin := "inner join lateral (\nselect string_agg(a.name, '.') as sortorder\nfrom tree(account.rid_account) r\ninner join account a on a.id = r.id\n) fn on true"
|
||||
|
||||
options := common.RequestOptions{
|
||||
Sort: []common.SortOption{{Column: "fn.sortorder", Direction: "ASC"}},
|
||||
CursorForward: "8975",
|
||||
}
|
||||
|
||||
tableName := "core.account"
|
||||
pkName := "rid_account"
|
||||
modelColumns := []string{"rid_account", "description", "pastelno"}
|
||||
expandJoins := map[string]string{"fn": lateralJoin}
|
||||
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options, expandJoins)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCursorFilter failed: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Generated lateral cursor filter: %s", filter)
|
||||
|
||||
if !strings.Contains(filter, "cursor_select_fn") {
|
||||
t.Errorf("Filter should reference cursor_select_fn alias, got: %s", filter)
|
||||
}
|
||||
if !strings.Contains(filter, "sortorder") {
|
||||
t.Errorf("Filter should reference sortorder column, got: %s", filter)
|
||||
}
|
||||
if strings.Contains(filter, " < ") || strings.Contains(filter, " > ") {
|
||||
t.Errorf("Filter should not contain empty comparison operators, got: %s", filter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetActiveCursor(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -288,18 +319,19 @@ func TestResolveColumn(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Joined column (not supported)",
|
||||
name: "Joined column (isJoin=true, no error)",
|
||||
field: "name",
|
||||
prefix: "user",
|
||||
tableName: "posts",
|
||||
modelColumns: []string{"id", "title"},
|
||||
wantErr: true,
|
||||
wantErr: false,
|
||||
// cursorCol and targetCol are empty when isJoin=true; handled by caller
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cursor, target, err := resolveColumn(tt.field, tt.prefix, tt.tableName, tt.modelColumns)
|
||||
cursor, target, isJoin, err := resolveColumn(tt.field, tt.prefix, tt.tableName, tt.modelColumns)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
@@ -312,6 +344,14 @@ func TestResolveColumn(t *testing.T) {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// For join columns, cursor/target are empty and isJoin=true
|
||||
if isJoin {
|
||||
if cursor != "" || target != "" {
|
||||
t.Errorf("Expected empty cursor/target for join column, got %q / %q", cursor, target)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if cursor != tt.wantCursor {
|
||||
t.Errorf("Expected cursor %q, got %q", tt.wantCursor, cursor)
|
||||
}
|
||||
@@ -362,7 +402,7 @@ func TestCursorFilter_SQL_Safety(t *testing.T) {
|
||||
pkName := "id"
|
||||
modelColumns := []string{"id", "created_at"}
|
||||
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options)
|
||||
filter, err := GetCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCursorFilter failed: %v", err)
|
||||
}
|
||||
|
||||
143
pkg/resolvespec/filter_test.go
Normal file
143
pkg/resolvespec/filter_test.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package resolvespec
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// TestBuildFilterCondition tests the filter condition builder
|
||||
func TestBuildFilterCondition(t *testing.T) {
|
||||
h := &Handler{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
filter common.FilterOption
|
||||
expectedCondition string
|
||||
expectedArgsCount int
|
||||
}{
|
||||
{
|
||||
name: "Equal operator",
|
||||
filter: common.FilterOption{
|
||||
Column: "status",
|
||||
Operator: "eq",
|
||||
Value: "active",
|
||||
},
|
||||
expectedCondition: "status = ?",
|
||||
expectedArgsCount: 1,
|
||||
},
|
||||
{
|
||||
name: "Greater than operator",
|
||||
filter: common.FilterOption{
|
||||
Column: "age",
|
||||
Operator: "gt",
|
||||
Value: 18,
|
||||
},
|
||||
expectedCondition: "age > ?",
|
||||
expectedArgsCount: 1,
|
||||
},
|
||||
{
|
||||
name: "IN operator",
|
||||
filter: common.FilterOption{
|
||||
Column: "status",
|
||||
Operator: "in",
|
||||
Value: []string{"active", "pending"},
|
||||
},
|
||||
expectedCondition: "status IN (?,?)",
|
||||
expectedArgsCount: 2,
|
||||
},
|
||||
{
|
||||
name: "LIKE operator",
|
||||
filter: common.FilterOption{
|
||||
Column: "email",
|
||||
Operator: "like",
|
||||
Value: "%@example.com",
|
||||
},
|
||||
expectedCondition: "email LIKE ?",
|
||||
expectedArgsCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
condition, args := h.buildFilterCondition(tt.filter)
|
||||
|
||||
if condition != tt.expectedCondition {
|
||||
t.Errorf("Expected condition '%s', got '%s'", tt.expectedCondition, condition)
|
||||
}
|
||||
|
||||
if len(args) != tt.expectedArgsCount {
|
||||
t.Errorf("Expected %d args, got %d", tt.expectedArgsCount, len(args))
|
||||
}
|
||||
|
||||
// Note: Skip value comparison for slices as they can't be compared with ==
|
||||
// The important part is that args are populated correctly
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestORGrouping tests that consecutive OR filters are properly grouped
|
||||
func TestORGrouping(t *testing.T) {
|
||||
// This is a conceptual test - in practice we'd need a mock SelectQuery
|
||||
// to verify the actual SQL grouping behavior
|
||||
t.Run("Consecutive OR filters should be grouped", func(t *testing.T) {
|
||||
filters := []common.FilterOption{
|
||||
{Column: "status", Operator: "eq", Value: "active"},
|
||||
{Column: "status", Operator: "eq", Value: "pending", LogicOperator: "OR"},
|
||||
{Column: "status", Operator: "eq", Value: "trial", LogicOperator: "OR"},
|
||||
{Column: "age", Operator: "gte", Value: 18},
|
||||
}
|
||||
|
||||
// Expected behavior: (status='active' OR status='pending' OR status='trial') AND age>=18
|
||||
// The first three filters should be grouped together
|
||||
// The fourth filter should be separate with AND
|
||||
|
||||
// Count OR groups
|
||||
orGroupCount := 0
|
||||
inORGroup := false
|
||||
|
||||
for i := 1; i < len(filters); i++ {
|
||||
if strings.EqualFold(filters[i].LogicOperator, "OR") && !inORGroup {
|
||||
orGroupCount++
|
||||
inORGroup = true
|
||||
} else if !strings.EqualFold(filters[i].LogicOperator, "OR") {
|
||||
inORGroup = false
|
||||
}
|
||||
}
|
||||
|
||||
// We should have detected one OR group
|
||||
if orGroupCount != 1 {
|
||||
t.Errorf("Expected 1 OR group, detected %d", orGroupCount)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Multiple OR groups should be handled correctly", func(t *testing.T) {
|
||||
filters := []common.FilterOption{
|
||||
{Column: "status", Operator: "eq", Value: "active"},
|
||||
{Column: "status", Operator: "eq", Value: "pending", LogicOperator: "OR"},
|
||||
{Column: "priority", Operator: "eq", Value: "high"},
|
||||
{Column: "priority", Operator: "eq", Value: "urgent", LogicOperator: "OR"},
|
||||
}
|
||||
|
||||
// Expected: (status='active' OR status='pending') AND (priority='high' OR priority='urgent')
|
||||
// Should have two OR groups
|
||||
|
||||
orGroupCount := 0
|
||||
inORGroup := false
|
||||
|
||||
for i := 1; i < len(filters); i++ {
|
||||
if strings.EqualFold(filters[i].LogicOperator, "OR") && !inORGroup {
|
||||
orGroupCount++
|
||||
inORGroup = true
|
||||
} else if !strings.EqualFold(filters[i].LogicOperator, "OR") {
|
||||
inORGroup = false
|
||||
}
|
||||
}
|
||||
|
||||
// We should have detected two OR groups
|
||||
if orGroupCount != 2 {
|
||||
t.Errorf("Expected 2 OR groups, detected %d", orGroupCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -138,6 +138,26 @@ func (h *Handler) Handle(w common.ResponseWriter, r common.Request, params map[s
|
||||
validator := common.NewColumnValidator(model)
|
||||
req.Options = validator.FilterRequestOptions(req.Options)
|
||||
|
||||
// Execute BeforeHandle hook - auth check fires here, after model resolution
|
||||
beforeCtx := &HookContext{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
Schema: schema,
|
||||
Entity: entity,
|
||||
Model: model,
|
||||
Writer: w,
|
||||
Request: r,
|
||||
Operation: req.Operation,
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeHandle, beforeCtx); err != nil {
|
||||
code := http.StatusUnauthorized
|
||||
if beforeCtx.AbortCode != 0 {
|
||||
code = beforeCtx.AbortCode
|
||||
}
|
||||
h.sendError(w, code, "unauthorized", beforeCtx.AbortMessage, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch req.Operation {
|
||||
case "read":
|
||||
h.handleRead(ctx, w, id, req.Options)
|
||||
@@ -280,10 +300,13 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
}
|
||||
}
|
||||
|
||||
// Apply filters
|
||||
for _, filter := range options.Filters {
|
||||
logger.Debug("Applying filter: %s %s %v", filter.Column, filter.Operator, filter.Value)
|
||||
query = h.applyFilter(query, filter)
|
||||
// Apply filters with proper grouping for OR logic
|
||||
query = h.applyFilters(query, options.Filters)
|
||||
|
||||
// Apply custom operators
|
||||
for _, customOp := range options.CustomOperators {
|
||||
logger.Debug("Applying custom operator: %s - %s", customOp.Name, customOp.SQL)
|
||||
query = query.Where(customOp.SQL)
|
||||
}
|
||||
|
||||
// Apply sorting
|
||||
@@ -306,8 +329,13 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
// Extract model columns for validation
|
||||
modelColumns := reflection.GetModelColumns(model)
|
||||
|
||||
// Get cursor filter SQL
|
||||
cursorFilter, err := GetCursorFilter(tableName, pkName, modelColumns, options)
|
||||
// Default sort to primary key when none provided
|
||||
if len(options.Sort) == 0 {
|
||||
options.Sort = []common.SortOption{{Column: pkName, Direction: "ASC"}}
|
||||
}
|
||||
|
||||
// Get cursor filter SQL (expandJoins is empty for resolvespec — no custom SQL join support yet)
|
||||
cursorFilter, err := GetCursorFilter(tableName, pkName, modelColumns, options, nil)
|
||||
if err != nil {
|
||||
logger.Error("Error building cursor filter: %v", err)
|
||||
h.sendError(w, http.StatusBadRequest, "cursor_error", "Invalid cursor pagination", err)
|
||||
@@ -381,7 +409,77 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
}
|
||||
}
|
||||
|
||||
// Apply pagination
|
||||
// Handle FetchRowNumber if requested
|
||||
var rowNumber *int64
|
||||
if options.FetchRowNumber != nil && *options.FetchRowNumber != "" {
|
||||
logger.Debug("Fetching row number for ID: %s", *options.FetchRowNumber)
|
||||
pkName := reflection.GetPrimaryKeyName(model)
|
||||
|
||||
// Build ROW_NUMBER window function SQL
|
||||
rowNumberSQL := "ROW_NUMBER() OVER ("
|
||||
if len(options.Sort) > 0 {
|
||||
rowNumberSQL += "ORDER BY "
|
||||
for i, sort := range options.Sort {
|
||||
if i > 0 {
|
||||
rowNumberSQL += ", "
|
||||
}
|
||||
direction := "ASC"
|
||||
if strings.EqualFold(sort.Direction, "desc") {
|
||||
direction = "DESC"
|
||||
}
|
||||
rowNumberSQL += fmt.Sprintf("%s %s", sort.Column, direction)
|
||||
}
|
||||
}
|
||||
rowNumberSQL += ")"
|
||||
|
||||
// Create a query to fetch the row number using a subquery approach
|
||||
// We'll select the PK and row_number, then filter by the target ID
|
||||
type RowNumResult struct {
|
||||
RowNum int64 `bun:"row_num"`
|
||||
}
|
||||
|
||||
rowNumQuery := h.db.NewSelect().Table(tableName).
|
||||
ColumnExpr(fmt.Sprintf("%s AS row_num", rowNumberSQL)).
|
||||
Column(pkName)
|
||||
|
||||
// Apply the same filters as the main query
|
||||
for _, filter := range options.Filters {
|
||||
rowNumQuery = h.applyFilter(rowNumQuery, filter)
|
||||
}
|
||||
|
||||
// Apply custom operators
|
||||
for _, customOp := range options.CustomOperators {
|
||||
rowNumQuery = rowNumQuery.Where(customOp.SQL)
|
||||
}
|
||||
|
||||
// Filter for the specific ID we want the row number for
|
||||
rowNumQuery = rowNumQuery.Where(fmt.Sprintf("%s = ?", common.QuoteIdent(pkName)), *options.FetchRowNumber)
|
||||
|
||||
// Execute query to get row number
|
||||
var result RowNumResult
|
||||
if err := rowNumQuery.Scan(ctx, &result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
// Build filter description for error message
|
||||
filterInfo := fmt.Sprintf("filters: %d", len(options.Filters))
|
||||
if len(options.CustomOperators) > 0 {
|
||||
customOps := make([]string, 0, len(options.CustomOperators))
|
||||
for _, op := range options.CustomOperators {
|
||||
customOps = append(customOps, op.SQL)
|
||||
}
|
||||
filterInfo += fmt.Sprintf(", custom operators: [%s]", strings.Join(customOps, "; "))
|
||||
}
|
||||
logger.Warn("No row found for primary key %s=%s with %s", pkName, *options.FetchRowNumber, filterInfo)
|
||||
} else {
|
||||
logger.Warn("Error fetching row number: %v", err)
|
||||
}
|
||||
} else {
|
||||
rowNumber = &result.RowNum
|
||||
logger.Debug("Found row number: %d", *rowNumber)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply pagination (skip if FetchRowNumber is set - we want only that record)
|
||||
if options.FetchRowNumber == nil || *options.FetchRowNumber == "" {
|
||||
if options.Limit != nil && *options.Limit > 0 {
|
||||
logger.Debug("Applying limit: %d", *options.Limit)
|
||||
query = query.Limit(*options.Limit)
|
||||
@@ -390,15 +488,26 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
logger.Debug("Applying offset: %d", *options.Offset)
|
||||
query = query.Offset(*options.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute query
|
||||
var result interface{}
|
||||
if id != "" || (options.FetchRowNumber != nil && *options.FetchRowNumber != "") {
|
||||
// Single record query - either by URL ID or FetchRowNumber
|
||||
var targetID string
|
||||
if id != "" {
|
||||
logger.Debug("Querying single record with ID: %s", id)
|
||||
targetID = id
|
||||
logger.Debug("Querying single record with URL ID: %s", id)
|
||||
} else {
|
||||
targetID = *options.FetchRowNumber
|
||||
logger.Debug("Querying single record with FetchRowNumber ID: %s", targetID)
|
||||
}
|
||||
|
||||
// For single record, create a new pointer to the struct type
|
||||
singleResult := reflect.New(modelType).Interface()
|
||||
pkName := reflection.GetPrimaryKeyName(singleResult)
|
||||
|
||||
query = query.Where(fmt.Sprintf("%s = ?", common.QuoteIdent(reflection.GetPrimaryKeyName(singleResult))), id)
|
||||
query = query.Where(fmt.Sprintf("%s = ?", common.QuoteIdent(pkName)), targetID)
|
||||
if err := query.Scan(ctx, singleResult); err != nil {
|
||||
logger.Error("Error querying record: %v", err)
|
||||
h.sendError(w, http.StatusInternalServerError, "query_error", "Error executing query", err)
|
||||
@@ -418,20 +527,39 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
|
||||
logger.Info("Successfully retrieved records")
|
||||
|
||||
// Build metadata
|
||||
limit := 0
|
||||
offset := 0
|
||||
count := int64(total)
|
||||
|
||||
// When FetchRowNumber is used, we only return 1 record
|
||||
if options.FetchRowNumber != nil && *options.FetchRowNumber != "" {
|
||||
count = 1
|
||||
// Set the fetched row number on the record
|
||||
if rowNumber != nil {
|
||||
logger.Debug("FetchRowNumber: Setting row number %d on record", *rowNumber)
|
||||
h.setRowNumbersOnRecords(result, int(*rowNumber-1)) // -1 because setRowNumbersOnRecords adds 1
|
||||
}
|
||||
} else {
|
||||
if options.Limit != nil {
|
||||
limit = *options.Limit
|
||||
}
|
||||
offset := 0
|
||||
if options.Offset != nil {
|
||||
offset = *options.Offset
|
||||
}
|
||||
|
||||
// Set row numbers on records if RowNumber field exists
|
||||
// Only for multiple records (not when fetching single record)
|
||||
h.setRowNumbersOnRecords(result, offset)
|
||||
}
|
||||
|
||||
h.sendResponse(w, result, &common.Metadata{
|
||||
Total: int64(total),
|
||||
Filtered: int64(total),
|
||||
Count: count,
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
RowNumber: rowNumber,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1133,6 +1261,24 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
|
||||
logger.Info("Deleting records from %s.%s", schema, entity)
|
||||
|
||||
// Execute BeforeDelete hooks (covers model-rule checks before any deletion)
|
||||
hookCtx := &HookContext{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
Schema: schema,
|
||||
Entity: entity,
|
||||
Model: model,
|
||||
ID: id,
|
||||
Data: data,
|
||||
Writer: w,
|
||||
Tx: h.db,
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeDelete, hookCtx); err != nil {
|
||||
logger.Error("BeforeDelete hook failed: %v", err)
|
||||
h.sendError(w, http.StatusForbidden, "delete_forbidden", "Delete operation not allowed", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle batch delete from request data
|
||||
if data != nil {
|
||||
switch v := data.(type) {
|
||||
@@ -1303,29 +1449,165 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
h.sendResponse(w, recordToDelete, nil)
|
||||
}
|
||||
|
||||
func (h *Handler) applyFilter(query common.SelectQuery, filter common.FilterOption) common.SelectQuery {
|
||||
// applyFilters applies all filters with proper grouping for OR logic
|
||||
// Groups consecutive OR filters together to ensure proper query precedence
|
||||
// Example: [A, B(OR), C(OR), D(AND)] => WHERE (A OR B OR C) AND D
|
||||
func (h *Handler) applyFilters(query common.SelectQuery, filters []common.FilterOption) common.SelectQuery {
|
||||
if len(filters) == 0 {
|
||||
return query
|
||||
}
|
||||
|
||||
i := 0
|
||||
for i < len(filters) {
|
||||
// Check if this starts an OR group (current or next filter has OR logic)
|
||||
startORGroup := i+1 < len(filters) && strings.EqualFold(filters[i+1].LogicOperator, "OR")
|
||||
|
||||
if startORGroup {
|
||||
// Collect all consecutive filters that are OR'd together
|
||||
orGroup := []common.FilterOption{filters[i]}
|
||||
j := i + 1
|
||||
for j < len(filters) && strings.EqualFold(filters[j].LogicOperator, "OR") {
|
||||
orGroup = append(orGroup, filters[j])
|
||||
j++
|
||||
}
|
||||
|
||||
// Apply the OR group as a single grouped WHERE clause
|
||||
query = h.applyFilterGroup(query, orGroup)
|
||||
i = j
|
||||
} else {
|
||||
// Single filter with AND logic (or first filter)
|
||||
condition, args := h.buildFilterCondition(filters[i])
|
||||
if condition != "" {
|
||||
query = query.Where(condition, args...)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
return query
|
||||
}
|
||||
|
||||
// applyFilterGroup applies a group of filters that should be OR'd together
|
||||
// Always wraps them in parentheses and applies as a single WHERE clause
|
||||
func (h *Handler) applyFilterGroup(query common.SelectQuery, filters []common.FilterOption) common.SelectQuery {
|
||||
if len(filters) == 0 {
|
||||
return query
|
||||
}
|
||||
|
||||
// Build all conditions and collect args
|
||||
var conditions []string
|
||||
var args []interface{}
|
||||
|
||||
for _, filter := range filters {
|
||||
condition, filterArgs := h.buildFilterCondition(filter)
|
||||
if condition != "" {
|
||||
conditions = append(conditions, condition)
|
||||
args = append(args, filterArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(conditions) == 0 {
|
||||
return query
|
||||
}
|
||||
|
||||
// Single filter - no need for grouping
|
||||
if len(conditions) == 1 {
|
||||
return query.Where(conditions[0], args...)
|
||||
}
|
||||
|
||||
// Multiple conditions - group with parentheses and OR
|
||||
groupedCondition := "(" + strings.Join(conditions, " OR ") + ")"
|
||||
return query.Where(groupedCondition, args...)
|
||||
}
|
||||
|
||||
// buildFilterCondition builds a filter condition and returns it with args
|
||||
func (h *Handler) buildFilterCondition(filter common.FilterOption) (conditionString string, conditionArgs []interface{}) {
|
||||
var condition string
|
||||
var args []interface{}
|
||||
|
||||
switch filter.Operator {
|
||||
case "eq":
|
||||
return query.Where(fmt.Sprintf("%s = ?", filter.Column), filter.Value)
|
||||
case "neq":
|
||||
return query.Where(fmt.Sprintf("%s != ?", filter.Column), filter.Value)
|
||||
case "gt":
|
||||
return query.Where(fmt.Sprintf("%s > ?", filter.Column), filter.Value)
|
||||
case "gte":
|
||||
return query.Where(fmt.Sprintf("%s >= ?", filter.Column), filter.Value)
|
||||
case "lt":
|
||||
return query.Where(fmt.Sprintf("%s < ?", filter.Column), filter.Value)
|
||||
case "lte":
|
||||
return query.Where(fmt.Sprintf("%s <= ?", filter.Column), filter.Value)
|
||||
case "eq", "=":
|
||||
condition = fmt.Sprintf("%s = ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "neq", "!=", "<>":
|
||||
condition = fmt.Sprintf("%s != ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "gt", ">":
|
||||
condition = fmt.Sprintf("%s > ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "gte", ">=":
|
||||
condition = fmt.Sprintf("%s >= ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "lt", "<":
|
||||
condition = fmt.Sprintf("%s < ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "lte", "<=":
|
||||
condition = fmt.Sprintf("%s <= ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "like":
|
||||
return query.Where(fmt.Sprintf("%s LIKE ?", filter.Column), filter.Value)
|
||||
condition = fmt.Sprintf("%s LIKE ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "ilike":
|
||||
return query.Where(fmt.Sprintf("%s ILIKE ?", filter.Column), filter.Value)
|
||||
condition = fmt.Sprintf("%s ILIKE ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "in":
|
||||
return query.Where(fmt.Sprintf("%s IN (?)", filter.Column), filter.Value)
|
||||
condition, args = common.BuildInCondition(filter.Column, filter.Value)
|
||||
if condition == "" {
|
||||
return "", nil
|
||||
}
|
||||
default:
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return condition, args
|
||||
}
|
||||
|
||||
func (h *Handler) applyFilter(query common.SelectQuery, filter common.FilterOption) common.SelectQuery {
|
||||
// Determine which method to use based on LogicOperator
|
||||
useOrLogic := strings.EqualFold(filter.LogicOperator, "OR")
|
||||
|
||||
var condition string
|
||||
var args []interface{}
|
||||
|
||||
switch filter.Operator {
|
||||
case "eq", "=":
|
||||
condition = fmt.Sprintf("%s = ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "neq", "!=", "<>":
|
||||
condition = fmt.Sprintf("%s != ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "gt", ">":
|
||||
condition = fmt.Sprintf("%s > ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "gte", ">=":
|
||||
condition = fmt.Sprintf("%s >= ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "lt", "<":
|
||||
condition = fmt.Sprintf("%s < ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "lte", "<=":
|
||||
condition = fmt.Sprintf("%s <= ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "like":
|
||||
condition = fmt.Sprintf("%s LIKE ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "ilike":
|
||||
condition = fmt.Sprintf("%s ILIKE ?", filter.Column)
|
||||
args = []interface{}{filter.Value}
|
||||
case "in":
|
||||
condition, args = common.BuildInCondition(filter.Column, filter.Value)
|
||||
if condition == "" {
|
||||
return query
|
||||
}
|
||||
default:
|
||||
return query
|
||||
}
|
||||
|
||||
// Apply filter with appropriate logic operator
|
||||
if useOrLogic {
|
||||
return query.WhereOr(condition, args...)
|
||||
}
|
||||
return query.Where(condition, args...)
|
||||
}
|
||||
|
||||
// parseTableName splits a table name that may contain schema into separate schema and table
|
||||
@@ -1380,10 +1662,16 @@ func (h *Handler) getSchemaAndTable(defaultSchema, entity string, model interfac
|
||||
return schema, entity
|
||||
}
|
||||
|
||||
// getTableName returns the full table name including schema (schema.table)
|
||||
// getTableName returns the full table name including schema.
|
||||
// For most drivers the result is "schema.table". For SQLite, which does not
|
||||
// support schema-qualified names, the schema and table are joined with an
|
||||
// underscore: "schema_table".
|
||||
func (h *Handler) getTableName(schema, entity string, model interface{}) string {
|
||||
schemaName, tableName := h.getSchemaAndTable(schema, entity, model)
|
||||
if schemaName != "" {
|
||||
if h.db.DriverName() == "sqlite" {
|
||||
return fmt.Sprintf("%s_%s", schemaName, tableName)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", schemaName, tableName)
|
||||
}
|
||||
return tableName
|
||||
@@ -1703,6 +1991,51 @@ func toSnakeCase(s string) string {
|
||||
return strings.ToLower(result.String())
|
||||
}
|
||||
|
||||
// setRowNumbersOnRecords sets the RowNumber field on each record if it exists
|
||||
// The row number is calculated as offset + index + 1 (1-based)
|
||||
func (h *Handler) setRowNumbersOnRecords(records interface{}, offset int) {
|
||||
// Get the reflect value of the records
|
||||
recordsValue := reflect.ValueOf(records)
|
||||
if recordsValue.Kind() == reflect.Ptr {
|
||||
recordsValue = recordsValue.Elem()
|
||||
}
|
||||
|
||||
// Ensure it's a slice
|
||||
if recordsValue.Kind() != reflect.Slice {
|
||||
logger.Debug("setRowNumbersOnRecords: records is not a slice, skipping")
|
||||
return
|
||||
}
|
||||
|
||||
// Iterate through each record
|
||||
for i := 0; i < recordsValue.Len(); i++ {
|
||||
record := recordsValue.Index(i)
|
||||
|
||||
// Dereference if it's a pointer
|
||||
if record.Kind() == reflect.Ptr {
|
||||
if record.IsNil() {
|
||||
continue
|
||||
}
|
||||
record = record.Elem()
|
||||
}
|
||||
|
||||
// Ensure it's a struct
|
||||
if record.Kind() != reflect.Struct {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to find and set the RowNumber field
|
||||
rowNumberField := record.FieldByName("RowNumber")
|
||||
if rowNumberField.IsValid() && rowNumberField.CanSet() {
|
||||
// Check if the field is of type int64
|
||||
if rowNumberField.Kind() == reflect.Int64 {
|
||||
rowNum := int64(offset + i + 1)
|
||||
rowNumberField.SetInt(rowNum)
|
||||
logger.Debug("Set RowNumber=%d for record index %d", rowNum, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandleOpenAPI generates and returns the OpenAPI specification
|
||||
func (h *Handler) HandleOpenAPI(w common.ResponseWriter, r common.Request) {
|
||||
if h.openAPIGenerator == nil {
|
||||
|
||||
@@ -12,6 +12,10 @@ import (
|
||||
type HookType string
|
||||
|
||||
const (
|
||||
// BeforeHandle fires after model resolution, before operation dispatch.
|
||||
// Use this for auth checks that need model rules and user context simultaneously.
|
||||
BeforeHandle HookType = "before_handle"
|
||||
|
||||
// Read operation hooks
|
||||
BeforeRead HookType = "before_read"
|
||||
AfterRead HookType = "after_read"
|
||||
@@ -43,6 +47,9 @@ type HookContext struct {
|
||||
Writer common.ResponseWriter
|
||||
Request common.Request
|
||||
|
||||
// Operation being dispatched (e.g. "read", "create", "update", "delete")
|
||||
Operation string
|
||||
|
||||
// Operation-specific fields
|
||||
ID string
|
||||
Data interface{} // For create/update operations
|
||||
|
||||
@@ -70,17 +70,17 @@ func SetupMuxRoutes(muxRouter *mux.Router, handler *Handler, authMiddleware Midd
|
||||
entityWithIDPath := buildRoutePath(schema, entity) + "/{id}"
|
||||
|
||||
// Create handler functions for this specific entity
|
||||
postEntityHandler := createMuxHandler(handler, schema, entity, "")
|
||||
postEntityWithIDHandler := createMuxHandler(handler, schema, entity, "id")
|
||||
getEntityHandler := createMuxGetHandler(handler, schema, entity, "")
|
||||
var postEntityHandler http.Handler = createMuxHandler(handler, schema, entity, "")
|
||||
var postEntityWithIDHandler http.Handler = createMuxHandler(handler, schema, entity, "id")
|
||||
var getEntityHandler http.Handler = createMuxGetHandler(handler, schema, entity, "")
|
||||
optionsEntityHandler := createMuxOptionsHandler(handler, schema, entity, []string{"GET", "POST", "OPTIONS"})
|
||||
optionsEntityWithIDHandler := createMuxOptionsHandler(handler, schema, entity, []string{"POST", "OPTIONS"})
|
||||
|
||||
// Apply authentication middleware if provided
|
||||
if authMiddleware != nil {
|
||||
postEntityHandler = authMiddleware(postEntityHandler).(http.HandlerFunc)
|
||||
postEntityWithIDHandler = authMiddleware(postEntityWithIDHandler).(http.HandlerFunc)
|
||||
getEntityHandler = authMiddleware(getEntityHandler).(http.HandlerFunc)
|
||||
postEntityHandler = authMiddleware(postEntityHandler)
|
||||
postEntityWithIDHandler = authMiddleware(postEntityWithIDHandler)
|
||||
getEntityHandler = authMiddleware(getEntityHandler)
|
||||
// Don't apply auth middleware to OPTIONS - CORS preflight must not require auth
|
||||
}
|
||||
|
||||
@@ -216,9 +216,34 @@ type BunRouterHandler interface {
|
||||
Handle(method, path string, handler bunrouter.HandlerFunc)
|
||||
}
|
||||
|
||||
// wrapBunRouterHandler wraps a bunrouter handler with auth middleware if provided
|
||||
func wrapBunRouterHandler(handler bunrouter.HandlerFunc, authMiddleware MiddlewareFunc) bunrouter.HandlerFunc {
|
||||
if authMiddleware == nil {
|
||||
return handler
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
// Create an http.Handler that calls the bunrouter handler
|
||||
httpHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Replace the embedded *http.Request with the middleware-enriched one
|
||||
// so that auth context (user ID, etc.) is visible to the handler.
|
||||
enrichedReq := req
|
||||
enrichedReq.Request = r
|
||||
_ = handler(w, enrichedReq)
|
||||
})
|
||||
|
||||
// Wrap with auth middleware and execute
|
||||
wrappedHandler := authMiddleware(httpHandler)
|
||||
wrappedHandler.ServeHTTP(w, req.Request)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SetupBunRouterRoutes sets up bunrouter routes for the ResolveSpec API
|
||||
// Accepts bunrouter.Router or bunrouter.Group
|
||||
func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
// authMiddleware is optional - if provided, routes will be protected with the middleware
|
||||
func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler, authMiddleware MiddlewareFunc) {
|
||||
|
||||
// CORS config
|
||||
corsConfig := common.DefaultCORSConfig()
|
||||
@@ -256,7 +281,7 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
currentEntity := entity
|
||||
|
||||
// POST route without ID
|
||||
r.Handle("POST", entityPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
postEntityHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewHTTPRequest(req.Request)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -267,10 +292,11 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("POST", entityPath, wrapBunRouterHandler(postEntityHandler, authMiddleware))
|
||||
|
||||
// POST route with ID
|
||||
r.Handle("POST", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
postEntityWithIDHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewHTTPRequest(req.Request)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -282,10 +308,11 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("POST", entityWithIDPath, wrapBunRouterHandler(postEntityWithIDHandler, authMiddleware))
|
||||
|
||||
// GET route without ID
|
||||
r.Handle("GET", entityPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
getEntityHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewHTTPRequest(req.Request)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -296,10 +323,11 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.HandleGet(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("GET", entityPath, wrapBunRouterHandler(getEntityHandler, authMiddleware))
|
||||
|
||||
// GET route with ID
|
||||
r.Handle("GET", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
getEntityWithIDHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewHTTPRequest(req.Request)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -311,9 +339,11 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.HandleGet(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("GET", entityWithIDPath, wrapBunRouterHandler(getEntityWithIDHandler, authMiddleware))
|
||||
|
||||
// OPTIONS route without ID (returns metadata)
|
||||
// Don't apply auth middleware to OPTIONS - CORS preflight must not require auth
|
||||
r.Handle("OPTIONS", entityPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewHTTPRequest(req.Request)
|
||||
@@ -330,6 +360,7 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
})
|
||||
|
||||
// OPTIONS route with ID (returns metadata)
|
||||
// Don't apply auth middleware to OPTIONS - CORS preflight must not require auth
|
||||
r.Handle("OPTIONS", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewHTTPRequest(req.Request)
|
||||
@@ -355,8 +386,8 @@ func ExampleWithBunRouter(bunDB *bun.DB) {
|
||||
// Create bunrouter
|
||||
bunRouter := bunrouter.New()
|
||||
|
||||
// Setup ResolveSpec routes with bunrouter
|
||||
SetupBunRouterRoutes(bunRouter, handler)
|
||||
// Setup ResolveSpec routes with bunrouter without authentication
|
||||
SetupBunRouterRoutes(bunRouter, handler, nil)
|
||||
|
||||
// Start server
|
||||
// http.ListenAndServe(":8080", bunRouter)
|
||||
@@ -377,8 +408,8 @@ func ExampleBunRouterWithBunDB(bunDB *bun.DB) {
|
||||
// Create bunrouter
|
||||
bunRouter := bunrouter.New()
|
||||
|
||||
// Setup ResolveSpec routes
|
||||
SetupBunRouterRoutes(bunRouter, handler)
|
||||
// Setup ResolveSpec routes without authentication
|
||||
SetupBunRouterRoutes(bunRouter, handler, nil)
|
||||
|
||||
// This gives you the full uptrace stack: bunrouter + Bun ORM
|
||||
// http.ListenAndServe(":8080", bunRouter)
|
||||
@@ -396,8 +427,87 @@ func ExampleBunRouterWithGroup(bunDB *bun.DB) {
|
||||
apiGroup := bunRouter.NewGroup("/api")
|
||||
|
||||
// Setup ResolveSpec routes on the group - routes will be under /api
|
||||
SetupBunRouterRoutes(apiGroup, handler)
|
||||
SetupBunRouterRoutes(apiGroup, handler, nil)
|
||||
|
||||
// Start server
|
||||
// http.ListenAndServe(":8080", bunRouter)
|
||||
}
|
||||
|
||||
// ExampleWithGORMAndAuth shows how to use ResolveSpec with GORM and authentication
|
||||
func ExampleWithGORMAndAuth(db *gorm.DB) {
|
||||
// Create handler using GORM
|
||||
_ = NewHandlerWithGORM(db)
|
||||
|
||||
// Create auth middleware
|
||||
// import "github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
// secList := security.NewSecurityList(myProvider)
|
||||
// authMiddleware := func(h http.Handler) http.Handler {
|
||||
// return security.NewAuthHandler(secList, h)
|
||||
// }
|
||||
|
||||
// Setup router with authentication
|
||||
_ = mux.NewRouter()
|
||||
// SetupMuxRoutes(muxRouter, handler, authMiddleware)
|
||||
|
||||
// Register models
|
||||
// handler.RegisterModel("public", "users", &User{})
|
||||
|
||||
// Start server
|
||||
// http.ListenAndServe(":8080", muxRouter)
|
||||
}
|
||||
|
||||
// ExampleWithBunAndAuth shows how to use ResolveSpec with Bun and authentication
|
||||
func ExampleWithBunAndAuth(bunDB *bun.DB) {
|
||||
// Create Bun adapter
|
||||
dbAdapter := database.NewBunAdapter(bunDB)
|
||||
|
||||
// Create model registry
|
||||
registry := modelregistry.NewModelRegistry()
|
||||
// registry.RegisterModel("public.users", &User{})
|
||||
|
||||
// Create handler
|
||||
_ = NewHandler(dbAdapter, registry)
|
||||
|
||||
// Create auth middleware
|
||||
// import "github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
// secList := security.NewSecurityList(myProvider)
|
||||
// authMiddleware := func(h http.Handler) http.Handler {
|
||||
// return security.NewAuthHandler(secList, h)
|
||||
// }
|
||||
|
||||
// Setup routes with authentication
|
||||
_ = mux.NewRouter()
|
||||
// SetupMuxRoutes(muxRouter, handler, authMiddleware)
|
||||
|
||||
// Start server
|
||||
// http.ListenAndServe(":8080", muxRouter)
|
||||
}
|
||||
|
||||
// ExampleBunRouterWithBunDBAndAuth shows the full uptrace stack with authentication
|
||||
func ExampleBunRouterWithBunDBAndAuth(bunDB *bun.DB) {
|
||||
// Create Bun database adapter
|
||||
dbAdapter := database.NewBunAdapter(bunDB)
|
||||
|
||||
// Create model registry
|
||||
registry := modelregistry.NewModelRegistry()
|
||||
// registry.RegisterModel("public.users", &User{})
|
||||
|
||||
// Create handler with Bun
|
||||
_ = NewHandler(dbAdapter, registry)
|
||||
|
||||
// Create auth middleware
|
||||
// import "github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
// secList := security.NewSecurityList(myProvider)
|
||||
// authMiddleware := func(h http.Handler) http.Handler {
|
||||
// return security.NewAuthHandler(secList, h)
|
||||
// }
|
||||
|
||||
// Create bunrouter
|
||||
_ = bunrouter.New()
|
||||
|
||||
// Setup ResolveSpec routes with authentication
|
||||
// SetupBunRouterRoutes(bunRouter, handler, authMiddleware)
|
||||
|
||||
// This gives you the full uptrace stack: bunrouter + Bun ORM with authentication
|
||||
// http.ListenAndServe(":8080", bunRouter)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package resolvespec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
@@ -10,6 +11,17 @@ import (
|
||||
|
||||
// RegisterSecurityHooks registers all security-related hooks with the handler
|
||||
func RegisterSecurityHooks(handler *Handler, securityList *security.SecurityList) {
|
||||
// Hook 0: BeforeHandle - enforce auth after model resolution
|
||||
handler.Hooks().Register(BeforeHandle, func(hookCtx *HookContext) error {
|
||||
if err := security.CheckModelAuthAllowed(newSecurityContext(hookCtx), hookCtx.Operation); err != nil {
|
||||
hookCtx.Abort = true
|
||||
hookCtx.AbortMessage = err.Error()
|
||||
hookCtx.AbortCode = http.StatusUnauthorized
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Hook 1: BeforeRead - Load security rules
|
||||
handler.Hooks().Register(BeforeRead, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
@@ -34,6 +46,18 @@ func RegisterSecurityHooks(handler *Handler, securityList *security.SecurityList
|
||||
return security.LogDataAccess(secCtx)
|
||||
})
|
||||
|
||||
// Hook 5: BeforeUpdate - enforce CanUpdate rule from context/registry
|
||||
handler.Hooks().Register(BeforeUpdate, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.CheckModelUpdateAllowed(secCtx)
|
||||
})
|
||||
|
||||
// Hook 6: BeforeDelete - enforce CanDelete rule from context/registry
|
||||
handler.Hooks().Register(BeforeDelete, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.CheckModelDeleteAllowed(secCtx)
|
||||
})
|
||||
|
||||
logger.Info("Security hooks registered for resolvespec handler")
|
||||
}
|
||||
|
||||
|
||||
@@ -147,6 +147,7 @@ handler.Hooks.Register(restheadspec.BeforeCreate, func(ctx *restheadspec.HookCon
|
||||
```
|
||||
|
||||
**Available Hook Types**:
|
||||
* `BeforeHandle` — fires after model resolution, before operation dispatch (auth checks)
|
||||
* `BeforeRead`, `AfterRead`
|
||||
* `BeforeCreate`, `AfterCreate`
|
||||
* `BeforeUpdate`, `AfterUpdate`
|
||||
@@ -157,11 +158,13 @@ handler.Hooks.Register(restheadspec.BeforeCreate, func(ctx *restheadspec.HookCon
|
||||
* `Handler`: Access to handler, database, and registry
|
||||
* `Schema`, `Entity`, `TableName`: Request info
|
||||
* `Model`: The registered model type
|
||||
* `Operation`: Current operation string (`"read"`, `"create"`, `"update"`, `"delete"`)
|
||||
* `Options`: Parsed request options (filters, sorting, etc.)
|
||||
* `ID`: Record ID (for single-record operations)
|
||||
* `Data`: Request data (for create/update)
|
||||
* `Result`: Operation result (for after hooks)
|
||||
* `Writer`: Response writer (allows hooks to modify response)
|
||||
* `Abort`, `AbortMessage`, `AbortCode`: Set in hook to abort with an error response
|
||||
|
||||
## Cursor Pagination
|
||||
|
||||
|
||||
@@ -32,6 +32,8 @@ func (opts *ExtendedRequestOptions) GetCursorFilter(
|
||||
modelColumns []string, // optional: for validation
|
||||
expandJoins map[string]string, // optional: alias → JOIN SQL
|
||||
) (string, error) {
|
||||
// Separate schema prefix from bare table name
|
||||
fullTableName := tableName
|
||||
if strings.Contains(tableName, ".") {
|
||||
tableName = strings.SplitN(tableName, ".", 2)[1]
|
||||
}
|
||||
@@ -62,7 +64,7 @@ func (opts *ExtendedRequestOptions) GetCursorFilter(
|
||||
// 4. Process each sort column
|
||||
// --------------------------------------------------------------------- //
|
||||
for _, s := range sortItems {
|
||||
col := strings.TrimSpace(s.Column)
|
||||
col := strings.Trim(strings.TrimSpace(s.Column), "()")
|
||||
if col == "" {
|
||||
continue
|
||||
}
|
||||
@@ -91,7 +93,8 @@ func (opts *ExtendedRequestOptions) GetCursorFilter(
|
||||
}
|
||||
|
||||
// Handle joins
|
||||
if isJoin && expandJoins != nil {
|
||||
if isJoin {
|
||||
if expandJoins != nil {
|
||||
if joinClause, ok := expandJoins[prefix]; ok {
|
||||
jSQL, cRef := rewriteJoin(joinClause, tableName, prefix)
|
||||
joinSQL = jSQL
|
||||
@@ -99,6 +102,11 @@ func (opts *ExtendedRequestOptions) GetCursorFilter(
|
||||
targetCol = prefix + "." + field
|
||||
}
|
||||
}
|
||||
if cursorCol == "" {
|
||||
logger.Warn("Skipping cursor sort column %q: join alias %q not in expandJoins", col, prefix)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Build inequality
|
||||
op := "<"
|
||||
@@ -127,7 +135,7 @@ func (opts *ExtendedRequestOptions) GetCursorFilter(
|
||||
WHERE cursor_select.%s = %s
|
||||
AND (%s)
|
||||
)`,
|
||||
tableName,
|
||||
fullTableName,
|
||||
joinSQL,
|
||||
pkName,
|
||||
cursorID,
|
||||
|
||||
@@ -187,9 +187,9 @@ func TestGetCursorFilter_WithSchemaPrefix(t *testing.T) {
|
||||
t.Fatalf("GetCursorFilter failed: %v", err)
|
||||
}
|
||||
|
||||
// Should handle schema prefix properly
|
||||
if !strings.Contains(filter, "users") {
|
||||
t.Errorf("Filter should reference table name users, got: %s", filter)
|
||||
// Should include full schema-qualified name in FROM clause
|
||||
if !strings.Contains(filter, "public.users") {
|
||||
t.Errorf("Filter FROM clause should use schema-qualified name public.users, got: %s", filter)
|
||||
}
|
||||
|
||||
t.Logf("Generated cursor filter with schema: %s", filter)
|
||||
@@ -278,6 +278,47 @@ func TestCleanSortField(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCursorFilter_LateralJoin(t *testing.T) {
|
||||
lateralJoin := "inner join lateral (\nselect string_agg(a.name, '.') as sortorder\nfrom tree(account.rid_account) r\ninner join account a on a.id = r.id\n) fn on true"
|
||||
|
||||
opts := &ExtendedRequestOptions{
|
||||
RequestOptions: common.RequestOptions{
|
||||
Sort: []common.SortOption{
|
||||
{Column: "fn.sortorder", Direction: "ASC"},
|
||||
},
|
||||
},
|
||||
}
|
||||
opts.CursorForward = "8975"
|
||||
|
||||
tableName := "core.account"
|
||||
pkName := "rid_account"
|
||||
// modelColumns does not contain "sortorder" - it's a lateral join computed column
|
||||
modelColumns := []string{"rid_account", "description", "pastelno"}
|
||||
expandJoins := map[string]string{"fn": lateralJoin}
|
||||
|
||||
filter, err := opts.GetCursorFilter(tableName, pkName, modelColumns, expandJoins)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCursorFilter failed: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Generated lateral cursor filter: %s", filter)
|
||||
|
||||
// Should contain the rewritten lateral join inside the EXISTS subquery
|
||||
if !strings.Contains(filter, "cursor_select_fn") {
|
||||
t.Errorf("Filter should reference cursor_select_fn alias, got: %s", filter)
|
||||
}
|
||||
|
||||
// Should compare fn.sortorder values
|
||||
if !strings.Contains(filter, "sortorder") {
|
||||
t.Errorf("Filter should reference sortorder column, got: %s", filter)
|
||||
}
|
||||
|
||||
// Should NOT contain empty comparison like "< "
|
||||
if strings.Contains(filter, " < ") || strings.Contains(filter, " > ") {
|
||||
t.Errorf("Filter should not contain empty comparison operators, got: %s", filter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPriorityChain(t *testing.T) {
|
||||
clauses := []string{
|
||||
"cursor_select.priority > posts.priority",
|
||||
|
||||
@@ -133,6 +133,41 @@ func (h *Handler) Handle(w common.ResponseWriter, r common.Request, params map[s
|
||||
// Add request-scoped data to context (including options)
|
||||
ctx = WithRequestData(ctx, schema, entity, tableName, model, modelPtr, options)
|
||||
|
||||
// Derive operation for auth check
|
||||
var operation string
|
||||
switch method {
|
||||
case "GET":
|
||||
operation = "read"
|
||||
case "POST":
|
||||
operation = "create"
|
||||
case "PUT", "PATCH":
|
||||
operation = "update"
|
||||
case "DELETE":
|
||||
operation = "delete"
|
||||
default:
|
||||
operation = "read"
|
||||
}
|
||||
|
||||
// Execute BeforeHandle hook - auth check fires here, after model resolution
|
||||
beforeCtx := &HookContext{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
Schema: schema,
|
||||
Entity: entity,
|
||||
Model: model,
|
||||
Writer: w,
|
||||
Request: r,
|
||||
Operation: operation,
|
||||
}
|
||||
if err := h.hooks.Execute(BeforeHandle, beforeCtx); err != nil {
|
||||
code := http.StatusUnauthorized
|
||||
if beforeCtx.AbortCode != 0 {
|
||||
code = beforeCtx.AbortCode
|
||||
}
|
||||
h.sendError(w, code, "unauthorized", beforeCtx.AbortMessage, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch method {
|
||||
case "GET":
|
||||
if id != "" {
|
||||
@@ -435,9 +470,11 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
}
|
||||
|
||||
// Apply preloading
|
||||
logger.Debug("Total preloads to apply: %d", len(options.Preload))
|
||||
for idx := range options.Preload {
|
||||
preload := options.Preload[idx]
|
||||
logger.Debug("Applying preload: %s", preload.Relation)
|
||||
logger.Debug("Applying preload [%d]: Relation=%s, Recursive=%v, RelatedKey=%s, Where=%s",
|
||||
idx, preload.Relation, preload.Recursive, preload.RelatedKey, preload.Where)
|
||||
|
||||
// Validate and fix WHERE clause to ensure it contains the relation prefix
|
||||
if len(preload.Where) > 0 {
|
||||
@@ -547,8 +584,30 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
}
|
||||
}
|
||||
|
||||
// If ID is provided, filter by ID
|
||||
if id != "" {
|
||||
// Handle FetchRowNumber before applying ID filter
|
||||
// This must happen before the query to get the row position, then filter by PK
|
||||
var fetchedRowNumber *int64
|
||||
var fetchRowNumberPKValue string
|
||||
if options.FetchRowNumber != nil && *options.FetchRowNumber != "" {
|
||||
pkName := reflection.GetPrimaryKeyName(model)
|
||||
fetchRowNumberPKValue = *options.FetchRowNumber
|
||||
|
||||
logger.Debug("FetchRowNumber: Fetching row number for PK %s = %s", pkName, fetchRowNumberPKValue)
|
||||
|
||||
rowNum, err := h.FetchRowNumber(ctx, tableName, pkName, fetchRowNumberPKValue, options, model)
|
||||
if err != nil {
|
||||
logger.Error("Failed to fetch row number: %v", err)
|
||||
h.sendError(w, http.StatusBadRequest, "fetch_rownumber_error", "Failed to fetch row number", err)
|
||||
return
|
||||
}
|
||||
|
||||
fetchedRowNumber = &rowNum
|
||||
logger.Debug("FetchRowNumber: Row number %d for PK %s = %s", rowNum, pkName, fetchRowNumberPKValue)
|
||||
|
||||
// Now filter the main query to this specific primary key
|
||||
query = query.Where(fmt.Sprintf("%s = ?", common.QuoteIdent(pkName)), fetchRowNumberPKValue)
|
||||
} else if id != "" {
|
||||
// If ID is provided (and not FetchRowNumber), filter by ID
|
||||
pkName := reflection.GetPrimaryKeyName(model)
|
||||
logger.Debug("Filtering by ID=%s: %s", pkName, id)
|
||||
|
||||
@@ -664,12 +723,19 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
// Extract model columns for validation using the generic database function
|
||||
modelColumns := reflection.GetModelColumns(model)
|
||||
|
||||
// Build expand joins map (if needed in future)
|
||||
var expandJoins map[string]string
|
||||
if len(options.Expand) > 0 {
|
||||
expandJoins = make(map[string]string)
|
||||
// TODO: Build actual JOIN SQL for each expand relation
|
||||
// For now, pass empty map as joins are handled via Preload
|
||||
// Build expand joins map: custom SQL joins are available in cursor subquery
|
||||
expandJoins := make(map[string]string)
|
||||
for _, joinClause := range options.CustomSQLJoin {
|
||||
alias := extractJoinAlias(joinClause)
|
||||
if alias != "" {
|
||||
expandJoins[alias] = joinClause
|
||||
}
|
||||
}
|
||||
// TODO: also add Expand relation JOINs when those are built as SQL rather than Preload
|
||||
|
||||
// Default sort to primary key when none provided
|
||||
if len(options.Sort) == 0 {
|
||||
options.Sort = []common.SortOption{{Column: pkName, Direction: "ASC"}}
|
||||
}
|
||||
|
||||
// Get cursor filter SQL
|
||||
@@ -728,7 +794,14 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
}
|
||||
|
||||
// Set row numbers on each record if the model has a RowNumber field
|
||||
// If FetchRowNumber was used, set the fetched row number instead of offset-based
|
||||
if fetchedRowNumber != nil {
|
||||
// FetchRowNumber: set the actual row position on the record
|
||||
logger.Debug("FetchRowNumber: Setting row number %d on record", *fetchedRowNumber)
|
||||
h.setRowNumbersOnRecords(modelPtr, int(*fetchedRowNumber-1)) // -1 because setRowNumbersOnRecords adds 1
|
||||
} else {
|
||||
h.setRowNumbersOnRecords(modelPtr, offset)
|
||||
}
|
||||
|
||||
metadata := &common.Metadata{
|
||||
Total: int64(total),
|
||||
@@ -738,21 +811,10 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
Offset: offset,
|
||||
}
|
||||
|
||||
// Fetch row number for a specific record if requested
|
||||
if options.FetchRowNumber != nil && *options.FetchRowNumber != "" {
|
||||
pkName := reflection.GetPrimaryKeyName(model)
|
||||
pkValue := *options.FetchRowNumber
|
||||
|
||||
logger.Debug("Fetching row number for specific PK %s = %s", pkName, pkValue)
|
||||
|
||||
rowNum, err := h.FetchRowNumber(ctx, tableName, pkName, pkValue, options, model)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to fetch row number: %v", err)
|
||||
// Don't fail the entire request, just log the warning
|
||||
} else {
|
||||
metadata.RowNumber = &rowNum
|
||||
logger.Debug("Row number for PK %s: %d", pkValue, rowNum)
|
||||
}
|
||||
// If FetchRowNumber was used, also set it in metadata
|
||||
if fetchedRowNumber != nil {
|
||||
metadata.RowNumber = fetchedRowNumber
|
||||
logger.Debug("FetchRowNumber: Row number %d set in metadata", *fetchedRowNumber)
|
||||
}
|
||||
|
||||
// Execute AfterRead hooks
|
||||
@@ -882,6 +944,15 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
|
||||
}
|
||||
}
|
||||
|
||||
// Apply custom SQL joins from XFiles
|
||||
if len(preload.SqlJoins) > 0 {
|
||||
logger.Debug("Applying %d SQL joins to preload %s", len(preload.SqlJoins), preload.Relation)
|
||||
for _, joinClause := range preload.SqlJoins {
|
||||
sq = sq.Join(joinClause)
|
||||
logger.Debug("Applied SQL join to preload %s: %s", preload.Relation, joinClause)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply filters
|
||||
if len(preload.Filters) > 0 {
|
||||
for _, filter := range preload.Filters {
|
||||
@@ -907,10 +978,25 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
|
||||
if len(preload.Where) > 0 {
|
||||
// Build RequestOptions with all preloads to allow references to sibling relations
|
||||
preloadOpts := &common.RequestOptions{Preload: allPreloads}
|
||||
// First add table prefixes to unqualified columns
|
||||
prefixedWhere := common.AddTablePrefixToColumns(preload.Where, reflection.ExtractTableNameOnly(preload.Relation))
|
||||
// Then sanitize and allow preload table prefixes
|
||||
sanitizedWhere := common.SanitizeWhereClause(prefixedWhere, reflection.ExtractTableNameOnly(preload.Relation), preloadOpts)
|
||||
|
||||
// Determine the table name to use for WHERE clause processing
|
||||
// Prefer the explicit TableName field (set by XFiles), otherwise extract from relation name
|
||||
tableName := preload.TableName
|
||||
if tableName == "" {
|
||||
tableName = reflection.ExtractTableNameOnly(preload.Relation)
|
||||
}
|
||||
|
||||
// In Bun's Relation context, table prefixes are only needed when there are JOINs
|
||||
// Without JOINs, Bun already knows which table is being queried
|
||||
whereClause := preload.Where
|
||||
if len(preload.SqlJoins) > 0 {
|
||||
// Has JOINs: add table prefixes to disambiguate columns
|
||||
whereClause = common.AddTablePrefixToColumns(preload.Where, tableName)
|
||||
logger.Debug("Added table prefix for preload with joins: '%s' -> '%s'", preload.Where, whereClause)
|
||||
}
|
||||
|
||||
// Sanitize the WHERE clause and allow preload table prefixes
|
||||
sanitizedWhere := common.SanitizeWhereClause(whereClause, tableName, preloadOpts)
|
||||
if len(sanitizedWhere) > 0 {
|
||||
sq = sq.Where(sanitizedWhere)
|
||||
}
|
||||
@@ -929,21 +1015,82 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
|
||||
})
|
||||
|
||||
// Handle recursive preloading
|
||||
if preload.Recursive && depth < 4 {
|
||||
if preload.Recursive && depth < 8 {
|
||||
logger.Debug("Applying recursive preload for %s at depth %d", preload.Relation, depth+1)
|
||||
|
||||
// For recursive relationships, we need to get the last part of the relation path
|
||||
// e.g., "MastertaskItems" -> "MastertaskItems.MastertaskItems"
|
||||
relationParts := strings.Split(preload.Relation, ".")
|
||||
lastRelationName := relationParts[len(relationParts)-1]
|
||||
|
||||
// Create a recursive preload with the same configuration
|
||||
// but with the relation path extended
|
||||
recursivePreload := preload
|
||||
recursivePreload.Relation = preload.Relation + "." + lastRelationName
|
||||
// Generate FK-based relation name for children
|
||||
// Use RecursiveChildKey if available, otherwise fall back to RelatedKey
|
||||
recursiveFK := preload.RecursiveChildKey
|
||||
if recursiveFK == "" {
|
||||
recursiveFK = preload.RelatedKey
|
||||
}
|
||||
|
||||
// Recursively apply preload until we reach depth 5
|
||||
recursiveRelationName := lastRelationName
|
||||
if recursiveFK != "" {
|
||||
// Check if the last relation name already contains the FK suffix
|
||||
// (this happens when XFiles already generated the FK-based name)
|
||||
fkUpper := strings.ToUpper(recursiveFK)
|
||||
expectedSuffix := "_" + fkUpper
|
||||
|
||||
if strings.HasSuffix(lastRelationName, expectedSuffix) {
|
||||
// Already has FK suffix, just reuse the same name
|
||||
recursiveRelationName = lastRelationName
|
||||
logger.Debug("Reusing FK-based relation name for recursion: %s", recursiveRelationName)
|
||||
} else {
|
||||
// Generate FK-based name
|
||||
recursiveRelationName = lastRelationName + expectedSuffix
|
||||
keySource := "RelatedKey"
|
||||
if preload.RecursiveChildKey != "" {
|
||||
keySource = "RecursiveChildKey"
|
||||
}
|
||||
logger.Debug("Generated recursive relation name from %s: %s (from %s)",
|
||||
keySource, recursiveRelationName, recursiveFK)
|
||||
}
|
||||
} else {
|
||||
logger.Warn("Recursive preload for %s has no RecursiveChildKey or RelatedKey, falling back to %s.%s",
|
||||
preload.Relation, preload.Relation, lastRelationName)
|
||||
}
|
||||
|
||||
// Create recursive preload
|
||||
recursivePreload := preload
|
||||
recursivePreload.Relation = preload.Relation + "." + recursiveRelationName
|
||||
recursivePreload.Recursive = false // Prevent infinite recursion at this level
|
||||
|
||||
// Use the recursive FK for child relations, not the parent's RelatedKey
|
||||
if preload.RecursiveChildKey != "" {
|
||||
recursivePreload.RelatedKey = preload.RecursiveChildKey
|
||||
}
|
||||
|
||||
// CRITICAL: Clear parent's WHERE clause - let Bun use FK traversal
|
||||
recursivePreload.Where = ""
|
||||
recursivePreload.Filters = []common.FilterOption{}
|
||||
logger.Debug("Cleared WHERE clause for recursive preload %s at depth %d",
|
||||
recursivePreload.Relation, depth+1)
|
||||
|
||||
// Apply recursively up to depth 8
|
||||
query = h.applyPreloadWithRecursion(query, recursivePreload, allPreloads, model, depth+1)
|
||||
|
||||
// ALSO: Extend any child relations (like DEF) to recursive levels
|
||||
baseRelation := preload.Relation + "."
|
||||
for i := range allPreloads {
|
||||
relatedPreload := allPreloads[i]
|
||||
if strings.HasPrefix(relatedPreload.Relation, baseRelation) &&
|
||||
!strings.Contains(strings.TrimPrefix(relatedPreload.Relation, baseRelation), ".") {
|
||||
childRelationName := strings.TrimPrefix(relatedPreload.Relation, baseRelation)
|
||||
|
||||
extendedChildPreload := relatedPreload
|
||||
extendedChildPreload.Relation = recursivePreload.Relation + "." + childRelationName
|
||||
extendedChildPreload.Recursive = false
|
||||
|
||||
logger.Debug("Extending related preload '%s' to '%s' at recursive depth %d",
|
||||
relatedPreload.Relation, extendedChildPreload.Relation, depth+1)
|
||||
|
||||
query = h.applyPreloadWithRecursion(query, extendedChildPreload, allPreloads, model, depth+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return query
|
||||
@@ -1393,8 +1540,8 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
}
|
||||
|
||||
if err := h.hooks.Execute(BeforeDelete, hookCtx); err != nil {
|
||||
logger.Warn("BeforeDelete hook failed for ID %s: %v", itemID, err)
|
||||
continue
|
||||
logger.Error("BeforeDelete hook failed for ID %s: %v", itemID, err)
|
||||
return fmt.Errorf("delete not allowed for ID %s: %w", itemID, err)
|
||||
}
|
||||
|
||||
query := tx.NewDelete().Table(tableName).Where(fmt.Sprintf("%s = ?", common.QuoteIdent(reflection.GetPrimaryKeyName(model))), itemID)
|
||||
@@ -1467,8 +1614,8 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
}
|
||||
|
||||
if err := h.hooks.Execute(BeforeDelete, hookCtx); err != nil {
|
||||
logger.Warn("BeforeDelete hook failed for ID %v: %v", itemID, err)
|
||||
continue
|
||||
logger.Error("BeforeDelete hook failed for ID %v: %v", itemID, err)
|
||||
return fmt.Errorf("delete not allowed for ID %v: %w", itemID, err)
|
||||
}
|
||||
|
||||
query := tx.NewDelete().Table(tableName).Where(fmt.Sprintf("%s = ?", common.QuoteIdent(reflection.GetPrimaryKeyName(model))), itemID)
|
||||
@@ -1525,8 +1672,8 @@ func (h *Handler) handleDelete(ctx context.Context, w common.ResponseWriter, id
|
||||
}
|
||||
|
||||
if err := h.hooks.Execute(BeforeDelete, hookCtx); err != nil {
|
||||
logger.Warn("BeforeDelete hook failed for ID %v: %v", itemID, err)
|
||||
continue
|
||||
logger.Error("BeforeDelete hook failed for ID %v: %v", itemID, err)
|
||||
return fmt.Errorf("delete not allowed for ID %v: %w", itemID, err)
|
||||
}
|
||||
|
||||
query := tx.NewDelete().Table(tableName).Where(fmt.Sprintf("%s = ?", common.QuoteIdent(reflection.GetPrimaryKeyName(model))), itemID)
|
||||
@@ -1928,11 +2075,18 @@ func (h *Handler) processChildRelationsForField(
|
||||
return nil
|
||||
}
|
||||
|
||||
// getTableNameForRelatedModel gets the table name for a related model
|
||||
// getTableNameForRelatedModel gets the table name for a related model.
|
||||
// If the model's TableName() is schema-qualified (e.g. "public.users") the
|
||||
// separator is adjusted for the active driver: underscore for SQLite, dot otherwise.
|
||||
func (h *Handler) getTableNameForRelatedModel(model interface{}, defaultName string) string {
|
||||
if provider, ok := model.(common.TableNameProvider); ok {
|
||||
tableName := provider.TableName()
|
||||
if tableName != "" {
|
||||
if schema, table := h.parseTableName(tableName); schema != "" {
|
||||
if h.db.DriverName() == "sqlite" {
|
||||
return fmt.Sprintf("%s_%s", schema, table)
|
||||
}
|
||||
}
|
||||
return tableName
|
||||
}
|
||||
}
|
||||
@@ -1999,7 +2153,11 @@ func (h *Handler) applyFilter(query common.SelectQuery, filter common.FilterOpti
|
||||
// Column is already cast to TEXT if needed
|
||||
return applyWhere(fmt.Sprintf("%s ILIKE ?", qualifiedColumn), filter.Value)
|
||||
case "in":
|
||||
return applyWhere(fmt.Sprintf("%s IN (?)", qualifiedColumn), filter.Value)
|
||||
cond, inArgs := common.BuildInCondition(qualifiedColumn, filter.Value)
|
||||
if cond == "" {
|
||||
return query
|
||||
}
|
||||
return applyWhere(cond, inArgs...)
|
||||
case "between":
|
||||
// Handle between operator - exclusive (> val1 AND < val2)
|
||||
if values, ok := filter.Value.([]interface{}); ok && len(values) == 2 {
|
||||
@@ -2075,24 +2233,25 @@ func (h *Handler) applyOrFilterGroup(query common.SelectQuery, filters []*common
|
||||
// buildFilterCondition builds a single filter condition and returns the condition string and args
|
||||
func (h *Handler) buildFilterCondition(qualifiedColumn string, filter *common.FilterOption, tableName string) (filterStr string, filterInterface []interface{}) {
|
||||
switch strings.ToLower(filter.Operator) {
|
||||
case "eq", "equals":
|
||||
case "eq", "equals", "=":
|
||||
return fmt.Sprintf("%s = ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "neq", "not_equals", "ne":
|
||||
case "neq", "not_equals", "ne", "!=", "<>":
|
||||
return fmt.Sprintf("%s != ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "gt", "greater_than":
|
||||
case "gt", "greater_than", ">":
|
||||
return fmt.Sprintf("%s > ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "gte", "greater_than_equals", "ge":
|
||||
case "gte", "greater_than_equals", "ge", ">=":
|
||||
return fmt.Sprintf("%s >= ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "lt", "less_than":
|
||||
case "lt", "less_than", "<":
|
||||
return fmt.Sprintf("%s < ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "lte", "less_than_equals", "le":
|
||||
case "lte", "less_than_equals", "le", "<=":
|
||||
return fmt.Sprintf("%s <= ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "like":
|
||||
return fmt.Sprintf("%s LIKE ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "ilike":
|
||||
return fmt.Sprintf("%s ILIKE ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "in":
|
||||
return fmt.Sprintf("%s IN (?)", qualifiedColumn), []interface{}{filter.Value}
|
||||
cond, inArgs := common.BuildInCondition(qualifiedColumn, filter.Value)
|
||||
return cond, inArgs
|
||||
case "between":
|
||||
// Handle between operator - exclusive (> val1 AND < val2)
|
||||
if values, ok := filter.Value.([]interface{}); ok && len(values) == 2 {
|
||||
@@ -2177,10 +2336,16 @@ func (h *Handler) getSchemaAndTable(defaultSchema, entity string, model interfac
|
||||
return schema, entity
|
||||
}
|
||||
|
||||
// getTableName returns the full table name including schema (schema.table)
|
||||
// getTableName returns the full table name including schema.
|
||||
// For most drivers the result is "schema.table". For SQLite, which does not
|
||||
// support schema-qualified names, the schema and table are joined with an
|
||||
// underscore: "schema_table".
|
||||
func (h *Handler) getTableName(schema, entity string, model interface{}) string {
|
||||
schemaName, tableName := h.getSchemaAndTable(schema, entity, model)
|
||||
if schemaName != "" {
|
||||
if h.db.DriverName() == "sqlite" {
|
||||
return fmt.Sprintf("%s_%s", schemaName, tableName)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", schemaName, tableName)
|
||||
}
|
||||
return tableName
|
||||
@@ -2502,21 +2667,8 @@ func (h *Handler) FetchRowNumber(ctx context.Context, tableName string, pkName s
|
||||
sortSQL = fmt.Sprintf("%s.%s ASC", tableName, pkName)
|
||||
}
|
||||
|
||||
// Build WHERE clauses from filters
|
||||
whereClauses := make([]string, 0)
|
||||
for i := range options.Filters {
|
||||
filter := &options.Filters[i]
|
||||
whereClause := h.buildFilterSQL(filter, tableName)
|
||||
if whereClause != "" {
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("(%s)", whereClause))
|
||||
}
|
||||
}
|
||||
|
||||
// Combine WHERE clauses
|
||||
whereSQL := ""
|
||||
if len(whereClauses) > 0 {
|
||||
whereSQL = "WHERE " + strings.Join(whereClauses, " AND ")
|
||||
}
|
||||
// Build WHERE clause from filters with proper OR grouping
|
||||
whereSQL := h.buildWhereClauseWithORGrouping(options.Filters, tableName)
|
||||
|
||||
// Add custom SQL WHERE if provided
|
||||
if options.CustomSQLWhere != "" {
|
||||
@@ -2564,19 +2716,86 @@ func (h *Handler) FetchRowNumber(ctx context.Context, tableName string, pkName s
|
||||
var result []struct {
|
||||
RN int64 `bun:"rn"`
|
||||
}
|
||||
logger.Debug("[FetchRowNumber] BEFORE Query call - about to execute raw query")
|
||||
err := h.db.Query(ctx, &result, queryStr, pkValue)
|
||||
logger.Debug("[FetchRowNumber] AFTER Query call - query completed with %d results, err: %v", len(result), err)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to fetch row number: %w", err)
|
||||
}
|
||||
|
||||
if len(result) == 0 {
|
||||
return 0, fmt.Errorf("no row found for primary key %s", pkValue)
|
||||
whereInfo := "none"
|
||||
if whereSQL != "" {
|
||||
whereInfo = whereSQL
|
||||
}
|
||||
return 0, fmt.Errorf("no row found for primary key %s=%s with active filters: %s", pkName, pkValue, whereInfo)
|
||||
}
|
||||
|
||||
return result[0].RN, nil
|
||||
}
|
||||
|
||||
// buildFilterSQL converts a filter to SQL WHERE clause string
|
||||
// buildWhereClauseWithORGrouping builds a WHERE clause from filters with proper OR grouping
|
||||
// Groups consecutive OR filters together to ensure proper SQL precedence
|
||||
// Example: [A, B(OR), C(OR), D(AND)] => WHERE (A OR B OR C) AND D
|
||||
func (h *Handler) buildWhereClauseWithORGrouping(filters []common.FilterOption, tableName string) string {
|
||||
if len(filters) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var groups []string
|
||||
i := 0
|
||||
|
||||
for i < len(filters) {
|
||||
// Check if this starts an OR group (next filter has OR logic)
|
||||
startORGroup := i+1 < len(filters) && strings.EqualFold(filters[i+1].LogicOperator, "OR")
|
||||
|
||||
if startORGroup {
|
||||
// Collect all consecutive filters that are OR'd together
|
||||
orGroup := []string{}
|
||||
|
||||
// Add current filter
|
||||
filterSQL := h.buildFilterSQL(&filters[i], tableName)
|
||||
if filterSQL != "" {
|
||||
orGroup = append(orGroup, filterSQL)
|
||||
}
|
||||
|
||||
// Collect remaining OR filters
|
||||
j := i + 1
|
||||
for j < len(filters) && strings.EqualFold(filters[j].LogicOperator, "OR") {
|
||||
filterSQL := h.buildFilterSQL(&filters[j], tableName)
|
||||
if filterSQL != "" {
|
||||
orGroup = append(orGroup, filterSQL)
|
||||
}
|
||||
j++
|
||||
}
|
||||
|
||||
// Group OR filters with parentheses
|
||||
if len(orGroup) > 0 {
|
||||
if len(orGroup) == 1 {
|
||||
groups = append(groups, orGroup[0])
|
||||
} else {
|
||||
groups = append(groups, "("+strings.Join(orGroup, " OR ")+")")
|
||||
}
|
||||
}
|
||||
i = j
|
||||
} else {
|
||||
// Single filter with AND logic (or first filter)
|
||||
filterSQL := h.buildFilterSQL(&filters[i], tableName)
|
||||
if filterSQL != "" {
|
||||
groups = append(groups, filterSQL)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
if len(groups) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return "WHERE " + strings.Join(groups, " AND ")
|
||||
}
|
||||
|
||||
func (h *Handler) buildFilterSQL(filter *common.FilterOption, tableName string) string {
|
||||
qualifiedColumn := h.qualifyColumnName(filter.Column, tableName)
|
||||
|
||||
@@ -2667,6 +2886,8 @@ func (h *Handler) filterExtendedOptions(validator *common.ColumnValidator, optio
|
||||
|
||||
// Filter base RequestOptions
|
||||
filtered.RequestOptions = validator.FilterRequestOptions(options.RequestOptions)
|
||||
// Restore JoinAliases cleared by FilterRequestOptions — still needed for SanitizeWhereClause
|
||||
filtered.RequestOptions.JoinAliases = options.JoinAliases
|
||||
|
||||
// Filter SearchColumns
|
||||
filtered.SearchColumns = validator.FilterValidColumns(options.SearchColumns)
|
||||
|
||||
@@ -49,6 +49,7 @@ type ExtendedRequestOptions struct {
|
||||
|
||||
// X-Files configuration - comprehensive query options as a single JSON object
|
||||
XFiles *XFiles
|
||||
XFilesPresent bool // Flag to indicate if X-Files header was provided
|
||||
}
|
||||
|
||||
// ExpandOption represents a relation expansion configuration
|
||||
@@ -273,7 +274,10 @@ func (h *Handler) parseOptionsFromHeaders(r common.Request, model interface{}) E
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve relation names (convert table names to field names) if model is provided
|
||||
// Resolve relation names (convert table names/prefixes to actual model field names) if model is provided.
|
||||
// This runs for both regular headers and X-Files, because XFile prefixes don't always match model
|
||||
// field names (e.g., prefix "HUB" vs field "HUB_RID_HUB"). RelatedKey/ForeignKey are used to
|
||||
// disambiguate when multiple fields point to the same related type.
|
||||
if model != nil {
|
||||
h.resolveRelationNamesInOptions(&options, model)
|
||||
}
|
||||
@@ -548,10 +552,8 @@ func (h *Handler) parseCustomSQLJoin(options *ExtendedRequestOptions, value stri
|
||||
// - "LEFT JOIN departments d ON ..." -> "d"
|
||||
// - "INNER JOIN users AS u ON ..." -> "u"
|
||||
// - "JOIN roles r ON ..." -> "r"
|
||||
// - "INNER JOIN LATERAL (...) fn ON true" -> "fn"
|
||||
func extractJoinAlias(joinClause string) string {
|
||||
// Pattern: JOIN table_name [AS] alias ON ...
|
||||
// We need to extract the alias (word before ON)
|
||||
|
||||
upperJoin := strings.ToUpper(joinClause)
|
||||
|
||||
// Find the "JOIN" keyword position
|
||||
@@ -560,7 +562,20 @@ func extractJoinAlias(joinClause string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Find the "ON" keyword position
|
||||
// Lateral joins: alias is the word after the closing ) and before ON
|
||||
if strings.Contains(upperJoin, "LATERAL") {
|
||||
lastClose := strings.LastIndex(joinClause, ")")
|
||||
if lastClose != -1 {
|
||||
words := strings.Fields(joinClause[lastClose+1:])
|
||||
// words should be like ["fn", "on", "true"] or ["on", "true"]
|
||||
if len(words) >= 1 && !strings.EqualFold(words[0], "on") {
|
||||
return words[0]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Regular joins: find the "ON" keyword position (first occurrence)
|
||||
onIdx := strings.Index(upperJoin, " ON ")
|
||||
if onIdx == -1 {
|
||||
return ""
|
||||
@@ -693,6 +708,7 @@ func (h *Handler) parseXFiles(options *ExtendedRequestOptions, value string) {
|
||||
|
||||
// Store the original XFiles for reference
|
||||
options.XFiles = &xfiles
|
||||
options.XFilesPresent = true // Mark that X-Files header was provided
|
||||
|
||||
// Map XFiles fields to ExtendedRequestOptions
|
||||
|
||||
@@ -860,8 +876,21 @@ func (h *Handler) resolveRelationNamesInOptions(options *ExtendedRequestOptions,
|
||||
|
||||
// Resolve each part of the path
|
||||
currentModel := model
|
||||
for _, part := range parts {
|
||||
resolvedPart := h.resolveRelationName(currentModel, part)
|
||||
for partIdx, part := range parts {
|
||||
isLast := partIdx == len(parts)-1
|
||||
var resolvedPart string
|
||||
if isLast {
|
||||
// For the final part, use join-key-aware resolution to disambiguate when
|
||||
// multiple fields point to the same type (e.g., HUB_RID_HUB vs HUB_RID_ASSIGNEDTO).
|
||||
// RelatedKey = parent's local column linking to child; ForeignKey = local column linking to parent.
|
||||
localKey := preload.RelatedKey
|
||||
if localKey == "" {
|
||||
localKey = preload.ForeignKey
|
||||
}
|
||||
resolvedPart = h.resolveRelationNameWithJoinKey(currentModel, part, localKey)
|
||||
} else {
|
||||
resolvedPart = h.resolveRelationName(currentModel, part)
|
||||
}
|
||||
resolvedParts = append(resolvedParts, resolvedPart)
|
||||
|
||||
// Try to get the model type for the next level
|
||||
@@ -977,6 +1006,101 @@ func (h *Handler) resolveRelationName(model interface{}, nameOrTable string) str
|
||||
return nameOrTable
|
||||
}
|
||||
|
||||
// resolveRelationNameWithJoinKey resolves a relation name like resolveRelationName, but when
|
||||
// multiple fields point to the same related type, uses localKey to pick the one whose bun join
|
||||
// tag starts with "join:localKey=". Falls back to resolveRelationName if no key match is found.
|
||||
func (h *Handler) resolveRelationNameWithJoinKey(model interface{}, nameOrTable string, localKey string) string {
|
||||
if localKey == "" {
|
||||
return h.resolveRelationName(model, nameOrTable)
|
||||
}
|
||||
|
||||
modelType := reflect.TypeOf(model)
|
||||
if modelType == nil {
|
||||
return nameOrTable
|
||||
}
|
||||
if modelType.Kind() == reflect.Ptr {
|
||||
modelType = modelType.Elem()
|
||||
}
|
||||
if modelType == nil || modelType.Kind() != reflect.Struct {
|
||||
return nameOrTable
|
||||
}
|
||||
|
||||
// If it's already a direct field name, return as-is (no ambiguity).
|
||||
for i := 0; i < modelType.NumField(); i++ {
|
||||
if modelType.Field(i).Name == nameOrTable {
|
||||
return nameOrTable
|
||||
}
|
||||
}
|
||||
|
||||
normalizedInput := strings.ToLower(strings.ReplaceAll(nameOrTable, "_", ""))
|
||||
localKeyLower := strings.ToLower(localKey)
|
||||
|
||||
// Find all fields whose related type matches nameOrTable, then pick the one
|
||||
// whose bun join tag local key matches localKey.
|
||||
var fallbackField string
|
||||
for i := 0; i < modelType.NumField(); i++ {
|
||||
field := modelType.Field(i)
|
||||
fieldType := field.Type
|
||||
|
||||
var targetType reflect.Type
|
||||
if fieldType.Kind() == reflect.Slice {
|
||||
targetType = fieldType.Elem()
|
||||
} else if fieldType.Kind() == reflect.Ptr {
|
||||
targetType = fieldType.Elem()
|
||||
}
|
||||
if targetType != nil && targetType.Kind() == reflect.Ptr {
|
||||
targetType = targetType.Elem()
|
||||
}
|
||||
if targetType == nil || targetType.Kind() != reflect.Struct {
|
||||
continue
|
||||
}
|
||||
|
||||
normalizedTypeName := strings.ToLower(targetType.Name())
|
||||
normalizedTypeName = strings.TrimPrefix(normalizedTypeName, "modelcore")
|
||||
normalizedTypeName = strings.TrimPrefix(normalizedTypeName, "model")
|
||||
if normalizedTypeName != normalizedInput {
|
||||
continue
|
||||
}
|
||||
|
||||
// Type name matches; record as fallback.
|
||||
if fallbackField == "" {
|
||||
fallbackField = field.Name
|
||||
}
|
||||
|
||||
// Check bun join tag: "join:localKey=foreignKey"
|
||||
bunTag := field.Tag.Get("bun")
|
||||
for _, tagPart := range strings.Split(bunTag, ",") {
|
||||
tagPart = strings.TrimSpace(tagPart)
|
||||
if !strings.HasPrefix(tagPart, "join:") {
|
||||
continue
|
||||
}
|
||||
joinSpec := strings.TrimPrefix(tagPart, "join:")
|
||||
// joinSpec can be "col1=col2" or "col1=col2 col3=col4" (multi-col joins)
|
||||
joinCols := strings.Fields(joinSpec)
|
||||
if len(joinCols) == 0 {
|
||||
joinCols = []string{joinSpec}
|
||||
}
|
||||
for _, joinCol := range joinCols {
|
||||
eqIdx := strings.Index(joinCol, "=")
|
||||
if eqIdx < 0 {
|
||||
continue
|
||||
}
|
||||
joinLocalKey := strings.ToLower(joinCol[:eqIdx])
|
||||
if joinLocalKey == localKeyLower {
|
||||
logger.Debug("Resolved '%s' (localKey: %s) -> field '%s'", nameOrTable, localKey, field.Name)
|
||||
return field.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fallbackField != "" {
|
||||
logger.Debug("No join key match for '%s' (localKey: %s), using first type match: '%s'", nameOrTable, localKey, fallbackField)
|
||||
return fallbackField
|
||||
}
|
||||
return h.resolveRelationName(model, nameOrTable)
|
||||
}
|
||||
|
||||
// addXFilesPreload converts an XFiles relation into a PreloadOption
|
||||
// and recursively processes its children
|
||||
func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOptions, basePath string) {
|
||||
@@ -984,11 +1108,33 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
|
||||
return
|
||||
}
|
||||
|
||||
// Store the table name as-is for now - it will be resolved to field name later
|
||||
// when we have the model instance available
|
||||
relationPath := xfile.TableName
|
||||
// Use the Prefix (e.g., "MAL") as the relation name, which matches the Go struct field name
|
||||
// Fall back to TableName if Prefix is not specified
|
||||
relationName := xfile.Prefix
|
||||
if relationName == "" {
|
||||
relationName = xfile.TableName
|
||||
}
|
||||
|
||||
// SPECIAL CASE: For recursive child tables, generate FK-based relation name
|
||||
// Example: If prefix is "MAL" and relatedkey is "rid_parentmastertaskitem",
|
||||
// the actual struct field is "MAL_RID_PARENTMASTERTASKITEM", not "MAL"
|
||||
if xfile.Recursive && xfile.RelatedKey != "" && basePath != "" {
|
||||
// Check if this is a self-referencing recursive relation (same table as parent)
|
||||
// by comparing the last part of basePath with the current prefix
|
||||
basePathParts := strings.Split(basePath, ".")
|
||||
lastPrefix := basePathParts[len(basePathParts)-1]
|
||||
|
||||
if lastPrefix == relationName {
|
||||
// This is a recursive self-reference, use FK-based name
|
||||
fkUpper := strings.ToUpper(xfile.RelatedKey)
|
||||
relationName = relationName + "_" + fkUpper
|
||||
logger.Debug("X-Files: Generated FK-based relation name for recursive table: %s", relationName)
|
||||
}
|
||||
}
|
||||
|
||||
relationPath := relationName
|
||||
if basePath != "" {
|
||||
relationPath = basePath + "." + xfile.TableName
|
||||
relationPath = basePath + "." + relationName
|
||||
}
|
||||
|
||||
logger.Debug("X-Files: Adding preload for relation: %s", relationPath)
|
||||
@@ -996,6 +1142,7 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
|
||||
// Create PreloadOption from XFiles configuration
|
||||
preloadOpt := common.PreloadOption{
|
||||
Relation: relationPath,
|
||||
TableName: xfile.TableName, // Store the actual database table name for WHERE clause processing
|
||||
Columns: xfile.Columns,
|
||||
OmitColumns: xfile.OmitColumns,
|
||||
}
|
||||
@@ -1035,15 +1182,42 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
|
||||
}
|
||||
}
|
||||
|
||||
// Transfer SqlJoins from XFiles to PreloadOption first, so aliases are available for WHERE sanitization
|
||||
if len(xfile.SqlJoins) > 0 {
|
||||
preloadOpt.SqlJoins = make([]string, 0, len(xfile.SqlJoins))
|
||||
preloadOpt.JoinAliases = make([]string, 0, len(xfile.SqlJoins))
|
||||
|
||||
for _, joinClause := range xfile.SqlJoins {
|
||||
// Sanitize the join clause
|
||||
sanitizedJoin := common.SanitizeWhereClause(joinClause, "", nil)
|
||||
if sanitizedJoin == "" {
|
||||
logger.Warn("X-Files: SqlJoin failed sanitization for %s: %s", relationPath, joinClause)
|
||||
continue
|
||||
}
|
||||
|
||||
preloadOpt.SqlJoins = append(preloadOpt.SqlJoins, sanitizedJoin)
|
||||
|
||||
// Extract join alias for validation
|
||||
alias := extractJoinAlias(sanitizedJoin)
|
||||
if alias != "" {
|
||||
preloadOpt.JoinAliases = append(preloadOpt.JoinAliases, alias)
|
||||
logger.Debug("X-Files: Extracted join alias for %s: %s", relationPath, alias)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("X-Files: Added %d SQL joins to preload %s", len(preloadOpt.SqlJoins), relationPath)
|
||||
}
|
||||
|
||||
// Add WHERE clause if SQL conditions specified
|
||||
// SqlJoins must be processed first so join aliases are known and not incorrectly replaced
|
||||
whereConditions := make([]string, 0)
|
||||
if len(xfile.SqlAnd) > 0 {
|
||||
// Process each SQL condition: add table prefixes and sanitize
|
||||
var sqlAndOpts *common.RequestOptions
|
||||
if len(preloadOpt.JoinAliases) > 0 {
|
||||
sqlAndOpts = &common.RequestOptions{JoinAliases: preloadOpt.JoinAliases}
|
||||
}
|
||||
for _, sqlCond := range xfile.SqlAnd {
|
||||
// First add table prefixes to unqualified columns
|
||||
prefixedCond := common.AddTablePrefixToColumns(sqlCond, xfile.TableName)
|
||||
// Then sanitize the condition
|
||||
sanitizedCond := common.SanitizeWhereClause(prefixedCond, xfile.TableName)
|
||||
sanitizedCond := common.SanitizeWhereClause(sqlCond, xfile.TableName, sqlAndOpts)
|
||||
if sanitizedCond != "" {
|
||||
whereConditions = append(whereConditions, sanitizedCond)
|
||||
}
|
||||
@@ -1088,13 +1262,46 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
|
||||
logger.Debug("X-Files: Set foreign key for %s: %s", relationPath, xfile.ForeignKey)
|
||||
}
|
||||
|
||||
// Check if this table has a recursive child - if so, mark THIS preload as recursive
|
||||
// and store the recursive child's RelatedKey for recursion generation
|
||||
hasRecursiveChild := false
|
||||
if len(xfile.ChildTables) > 0 {
|
||||
for _, childTable := range xfile.ChildTables {
|
||||
if childTable.Recursive && childTable.TableName == xfile.TableName {
|
||||
hasRecursiveChild = true
|
||||
preloadOpt.Recursive = true
|
||||
preloadOpt.RecursiveChildKey = childTable.RelatedKey
|
||||
logger.Debug("X-Files: Detected recursive child for %s, marking parent as recursive (recursive FK: %s)",
|
||||
relationPath, childTable.RelatedKey)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip adding this preload if it's a recursive child (it will be handled by parent's Recursive flag)
|
||||
if xfile.Recursive && basePath != "" {
|
||||
logger.Debug("X-Files: Skipping recursive child preload: %s (will be handled by parent)", relationPath)
|
||||
// Still process its parent/child tables for relations like DEF
|
||||
h.processXFilesRelations(xfile, options, relationPath)
|
||||
return
|
||||
}
|
||||
|
||||
// Add the preload option
|
||||
options.Preload = append(options.Preload, preloadOpt)
|
||||
logger.Debug("X-Files: Added preload [%d]: Relation=%s, Recursive=%v, RelatedKey=%s, RecursiveChildKey=%s, Where=%s",
|
||||
len(options.Preload)-1, preloadOpt.Relation, preloadOpt.Recursive, preloadOpt.RelatedKey, preloadOpt.RecursiveChildKey, preloadOpt.Where)
|
||||
|
||||
// Recursively process nested ParentTables and ChildTables
|
||||
if xfile.Recursive {
|
||||
logger.Debug("X-Files: Recursive preload enabled for: %s", relationPath)
|
||||
h.processXFilesRelations(xfile, options, relationPath)
|
||||
// Skip processing child tables if we already detected and handled a recursive child
|
||||
if hasRecursiveChild {
|
||||
logger.Debug("X-Files: Skipping child table processing for %s (recursive child already handled)", relationPath)
|
||||
// But still process parent tables
|
||||
if len(xfile.ParentTables) > 0 {
|
||||
logger.Debug("X-Files: Processing %d parent tables for %s", len(xfile.ParentTables), relationPath)
|
||||
for _, parentTable := range xfile.ParentTables {
|
||||
h.addXFilesPreload(parentTable, options, relationPath)
|
||||
}
|
||||
}
|
||||
} else if len(xfile.ParentTables) > 0 || len(xfile.ChildTables) > 0 {
|
||||
h.processXFilesRelations(xfile, options, relationPath)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package restheadspec
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
func TestDecodeHeaderValue(t *testing.T) {
|
||||
@@ -37,6 +39,131 @@ func TestDecodeHeaderValue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddXFilesPreload_WithSqlJoins(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
options := &ExtendedRequestOptions{
|
||||
RequestOptions: common.RequestOptions{
|
||||
Preload: make([]common.PreloadOption, 0),
|
||||
},
|
||||
}
|
||||
|
||||
// Create an XFiles with SqlJoins
|
||||
xfile := &XFiles{
|
||||
TableName: "users",
|
||||
SqlJoins: []string{
|
||||
"LEFT JOIN departments d ON d.id = users.department_id",
|
||||
"INNER JOIN roles r ON r.id = users.role_id",
|
||||
},
|
||||
FilterFields: []struct {
|
||||
Field string `json:"field"`
|
||||
Value string `json:"value"`
|
||||
Operator string `json:"operator"`
|
||||
}{
|
||||
{Field: "d.active", Value: "true", Operator: "eq"},
|
||||
{Field: "r.name", Value: "admin", Operator: "eq"},
|
||||
},
|
||||
}
|
||||
|
||||
// Add the XFiles preload
|
||||
handler.addXFilesPreload(xfile, options, "")
|
||||
|
||||
// Verify that a preload was added
|
||||
if len(options.Preload) != 1 {
|
||||
t.Fatalf("Expected 1 preload, got %d", len(options.Preload))
|
||||
}
|
||||
|
||||
preload := options.Preload[0]
|
||||
|
||||
// Verify relation name
|
||||
if preload.Relation != "users" {
|
||||
t.Errorf("Expected relation 'users', got '%s'", preload.Relation)
|
||||
}
|
||||
|
||||
// Verify SqlJoins were transferred
|
||||
if len(preload.SqlJoins) != 2 {
|
||||
t.Fatalf("Expected 2 SQL joins, got %d", len(preload.SqlJoins))
|
||||
}
|
||||
|
||||
// Verify JoinAliases were extracted
|
||||
if len(preload.JoinAliases) != 2 {
|
||||
t.Fatalf("Expected 2 join aliases, got %d", len(preload.JoinAliases))
|
||||
}
|
||||
|
||||
// Verify the aliases are correct
|
||||
expectedAliases := []string{"d", "r"}
|
||||
for i, expected := range expectedAliases {
|
||||
if preload.JoinAliases[i] != expected {
|
||||
t.Errorf("Expected alias '%s', got '%s'", expected, preload.JoinAliases[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Verify filters were added
|
||||
if len(preload.Filters) != 2 {
|
||||
t.Fatalf("Expected 2 filters, got %d", len(preload.Filters))
|
||||
}
|
||||
|
||||
// Verify filter columns reference joined tables
|
||||
if preload.Filters[0].Column != "d.active" {
|
||||
t.Errorf("Expected filter column 'd.active', got '%s'", preload.Filters[0].Column)
|
||||
}
|
||||
if preload.Filters[1].Column != "r.name" {
|
||||
t.Errorf("Expected filter column 'r.name', got '%s'", preload.Filters[1].Column)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractJoinAlias(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
joinClause string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "LEFT JOIN with alias",
|
||||
joinClause: "LEFT JOIN departments d ON d.id = users.department_id",
|
||||
expected: "d",
|
||||
},
|
||||
{
|
||||
name: "INNER JOIN with AS keyword",
|
||||
joinClause: "INNER JOIN users AS u ON u.id = orders.user_id",
|
||||
expected: "u",
|
||||
},
|
||||
{
|
||||
name: "JOIN without alias",
|
||||
joinClause: "JOIN roles ON roles.id = users.role_id",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Complex join with multiple conditions",
|
||||
joinClause: "LEFT OUTER JOIN products p ON p.id = items.product_id AND p.active = true",
|
||||
expected: "p",
|
||||
},
|
||||
{
|
||||
name: "Invalid join (no ON clause)",
|
||||
joinClause: "LEFT JOIN departments",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "LATERAL join with alias",
|
||||
joinClause: "inner join lateral (select sortorder from compute_fn(t.id)) fn on true",
|
||||
expected: "fn",
|
||||
},
|
||||
{
|
||||
name: "LATERAL join with multiline subquery containing inner ON",
|
||||
joinClause: "inner join lateral (\nselect string_agg(a.name, '.') as sortorder\nfrom tree(t.id) r\ninner join account a on a.id = r.id\n) fn on true",
|
||||
expected: "fn",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := extractJoinAlias(tt.joinClause)
|
||||
if result != tt.expected {
|
||||
t.Errorf("Expected alias '%s', got '%s'", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Note: The following functions are unexported (lowercase) and cannot be tested directly:
|
||||
// - parseSelectFields
|
||||
// - parseFieldFilter
|
||||
|
||||
@@ -12,6 +12,10 @@ import (
|
||||
type HookType string
|
||||
|
||||
const (
|
||||
// BeforeHandle fires after model resolution, before operation dispatch.
|
||||
// Use this for auth checks that need model rules and user context simultaneously.
|
||||
BeforeHandle HookType = "before_handle"
|
||||
|
||||
// Read operation hooks
|
||||
BeforeRead HookType = "before_read"
|
||||
AfterRead HookType = "after_read"
|
||||
@@ -42,6 +46,9 @@ type HookContext struct {
|
||||
Model interface{}
|
||||
Options ExtendedRequestOptions
|
||||
|
||||
// Operation being dispatched (e.g. "read", "create", "update", "delete")
|
||||
Operation string
|
||||
|
||||
// Operation-specific fields
|
||||
ID string
|
||||
Data interface{} // For create/update operations
|
||||
@@ -56,6 +63,14 @@ type HookContext struct {
|
||||
// Response writer - allows hooks to modify response
|
||||
Writer common.ResponseWriter
|
||||
|
||||
// Request - the original HTTP request
|
||||
Request common.Request
|
||||
|
||||
// Allow hooks to abort the operation
|
||||
Abort bool // If set to true, the operation will be aborted
|
||||
AbortMessage string // Message to return if aborted
|
||||
AbortCode int // HTTP status code if aborted
|
||||
|
||||
// Tx provides access to the database/transaction for executing additional SQL
|
||||
// This allows hooks to run custom queries in addition to the main Query chain
|
||||
Tx common.Database
|
||||
@@ -110,6 +125,12 @@ func (r *HookRegistry) Execute(hookType HookType, ctx *HookContext) error {
|
||||
logger.Error("Hook %d for %s failed: %v", i+1, hookType, err)
|
||||
return fmt.Errorf("hook execution failed: %w", err)
|
||||
}
|
||||
|
||||
// Check if hook requested abort
|
||||
if ctx.Abort {
|
||||
logger.Warn("Hook %d for %s requested abort: %s", i+1, hookType, ctx.AbortMessage)
|
||||
return fmt.Errorf("operation aborted by hook: %s", ctx.AbortMessage)
|
||||
}
|
||||
}
|
||||
|
||||
// logger.Debug("All hooks for %s executed successfully", hookType)
|
||||
|
||||
110
pkg/restheadspec/preload_tablename_test.go
Normal file
110
pkg/restheadspec/preload_tablename_test.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package restheadspec
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// TestPreloadOption_TableName verifies that TableName field is properly used
|
||||
// when provided in PreloadOption for WHERE clause processing
|
||||
func TestPreloadOption_TableName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
preload common.PreloadOption
|
||||
expectedTable string
|
||||
}{
|
||||
{
|
||||
name: "TableName provided explicitly",
|
||||
preload: common.PreloadOption{
|
||||
Relation: "MTL.MAL.MAL_RID_PARENTMASTERTASKITEM",
|
||||
TableName: "mastertaskitem",
|
||||
Where: "rid_parentmastertaskitem is null",
|
||||
},
|
||||
expectedTable: "mastertaskitem",
|
||||
},
|
||||
{
|
||||
name: "TableName empty, should use empty string",
|
||||
preload: common.PreloadOption{
|
||||
Relation: "MTL.MAL.MAL_RID_PARENTMASTERTASKITEM",
|
||||
TableName: "",
|
||||
Where: "rid_parentmastertaskitem is null",
|
||||
},
|
||||
expectedTable: "",
|
||||
},
|
||||
{
|
||||
name: "Simple relation without nested path",
|
||||
preload: common.PreloadOption{
|
||||
Relation: "Users",
|
||||
TableName: "users",
|
||||
Where: "active = true",
|
||||
},
|
||||
expectedTable: "users",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test that the TableName field stores the correct value
|
||||
if tt.preload.TableName != tt.expectedTable {
|
||||
t.Errorf("PreloadOption.TableName = %q, want %q", tt.preload.TableName, tt.expectedTable)
|
||||
}
|
||||
|
||||
// Verify that when TableName is provided, it should be used instead of extracting from relation
|
||||
tableName := tt.preload.TableName
|
||||
if tableName == "" {
|
||||
// This simulates the fallback logic in handler.go
|
||||
// In reality, reflection.ExtractTableNameOnly would be called
|
||||
tableName = tt.expectedTable
|
||||
}
|
||||
|
||||
if tableName != tt.expectedTable {
|
||||
t.Errorf("Resolved table name = %q, want %q", tableName, tt.expectedTable)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestXFilesPreload_StoresTableName verifies that XFiles processing
|
||||
// stores the table name in PreloadOption and doesn't add table prefixes to WHERE clauses
|
||||
func TestXFilesPreload_StoresTableName(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
xfiles := &XFiles{
|
||||
TableName: "mastertaskitem",
|
||||
Prefix: "MAL",
|
||||
PrimaryKey: "rid_mastertaskitem",
|
||||
RelatedKey: "rid_mastertask", // Changed from rid_parentmastertaskitem
|
||||
Recursive: false, // Changed from true (recursive children are now skipped)
|
||||
SqlAnd: []string{"rid_parentmastertaskitem is null"},
|
||||
}
|
||||
|
||||
options := &ExtendedRequestOptions{}
|
||||
|
||||
// Process XFiles
|
||||
handler.addXFilesPreload(xfiles, options, "MTL")
|
||||
|
||||
// Verify that a preload was added
|
||||
if len(options.Preload) == 0 {
|
||||
t.Fatal("Expected at least one preload to be added")
|
||||
}
|
||||
|
||||
preload := options.Preload[0]
|
||||
|
||||
// Verify the table name is stored
|
||||
if preload.TableName != "mastertaskitem" {
|
||||
t.Errorf("PreloadOption.TableName = %q, want %q", preload.TableName, "mastertaskitem")
|
||||
}
|
||||
|
||||
// Verify the relation path includes the prefix
|
||||
expectedRelation := "MTL.MAL"
|
||||
if preload.Relation != expectedRelation {
|
||||
t.Errorf("PreloadOption.Relation = %q, want %q", preload.Relation, expectedRelation)
|
||||
}
|
||||
|
||||
// Verify WHERE clause does NOT have table prefix (prefixes only needed for JOINs)
|
||||
expectedWhere := "rid_parentmastertaskitem is null"
|
||||
if preload.Where != expectedWhere {
|
||||
t.Errorf("PreloadOption.Where = %q, want %q (no table prefix)", preload.Where, expectedWhere)
|
||||
}
|
||||
}
|
||||
91
pkg/restheadspec/preload_where_joins_test.go
Normal file
91
pkg/restheadspec/preload_where_joins_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package restheadspec
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestPreloadWhereClause_WithJoins verifies that table prefixes are added
|
||||
// to WHERE clauses when SqlJoins are present
|
||||
func TestPreloadWhereClause_WithJoins(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
where string
|
||||
sqlJoins []string
|
||||
expectedPrefix bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "No joins - no prefix needed",
|
||||
where: "status = 'active'",
|
||||
sqlJoins: []string{},
|
||||
expectedPrefix: false,
|
||||
description: "Without JOINs, Bun knows the table context",
|
||||
},
|
||||
{
|
||||
name: "Has joins - prefix needed",
|
||||
where: "status = 'active'",
|
||||
sqlJoins: []string{"LEFT JOIN other_table ot ON ot.id = main.other_id"},
|
||||
expectedPrefix: true,
|
||||
description: "With JOINs, table prefix disambiguates columns",
|
||||
},
|
||||
{
|
||||
name: "Already has prefix - no change",
|
||||
where: "users.status = 'active'",
|
||||
sqlJoins: []string{"LEFT JOIN roles r ON r.id = users.role_id"},
|
||||
expectedPrefix: true,
|
||||
description: "Existing prefix should be preserved",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// This test documents the expected behavior
|
||||
// The actual logic is in handler.go lines 916-937
|
||||
|
||||
hasJoins := len(tt.sqlJoins) > 0
|
||||
if hasJoins != tt.expectedPrefix {
|
||||
t.Errorf("Test expectation mismatch: hasJoins=%v, expectedPrefix=%v",
|
||||
hasJoins, tt.expectedPrefix)
|
||||
}
|
||||
|
||||
t.Logf("%s: %s", tt.name, tt.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestXFilesWithJoins_AddsTablePrefix verifies that XFiles with SqlJoins
|
||||
// results in table prefixes being added to WHERE clauses
|
||||
func TestXFilesWithJoins_AddsTablePrefix(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
xfiles := &XFiles{
|
||||
TableName: "users",
|
||||
Prefix: "USR",
|
||||
PrimaryKey: "id",
|
||||
SqlAnd: []string{"status = 'active'"},
|
||||
SqlJoins: []string{"LEFT JOIN departments d ON d.id = users.department_id"},
|
||||
}
|
||||
|
||||
options := &ExtendedRequestOptions{}
|
||||
handler.addXFilesPreload(xfiles, options, "")
|
||||
|
||||
if len(options.Preload) == 0 {
|
||||
t.Fatal("Expected at least one preload to be added")
|
||||
}
|
||||
|
||||
preload := options.Preload[0]
|
||||
|
||||
// Verify SqlJoins were stored
|
||||
if len(preload.SqlJoins) != 1 {
|
||||
t.Errorf("Expected 1 SqlJoin, got %d", len(preload.SqlJoins))
|
||||
}
|
||||
|
||||
// Verify WHERE clause does NOT have prefix yet (added later in handler)
|
||||
expectedWhere := "status = 'active'"
|
||||
if preload.Where != expectedWhere {
|
||||
t.Errorf("PreloadOption.Where = %q, want %q", preload.Where, expectedWhere)
|
||||
}
|
||||
|
||||
// Note: The handler will add the prefix when it sees SqlJoins
|
||||
// This is tested in the handler itself, not during XFiles parsing
|
||||
}
|
||||
391
pkg/restheadspec/recursive_preload_test.go
Normal file
391
pkg/restheadspec/recursive_preload_test.go
Normal file
@@ -0,0 +1,391 @@
|
||||
//go:build !integration
|
||||
// +build !integration
|
||||
|
||||
package restheadspec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// TestRecursivePreloadClearsWhereClause tests that recursive preloads
|
||||
// correctly clear the WHERE clause from the parent level to allow
|
||||
// Bun to use foreign key relationships for loading children
|
||||
func TestRecursivePreloadClearsWhereClause(t *testing.T) {
|
||||
// Create a mock handler
|
||||
handler := &Handler{}
|
||||
|
||||
// Create a preload option with a WHERE clause that filters root items
|
||||
// This simulates the xfiles use case where the first level has a filter
|
||||
// like "rid_parentmastertaskitem is null" to get root items
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MastertaskItems",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
Where: "rid_parentmastertaskitem is null",
|
||||
Filters: []common.FilterOption{
|
||||
{
|
||||
Column: "rid_parentmastertaskitem",
|
||||
Operator: "is null",
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a mock query that tracks operations
|
||||
mockQuery := &mockSelectQuery{
|
||||
operations: []string{},
|
||||
}
|
||||
|
||||
// Apply the recursive preload at depth 0
|
||||
// This should:
|
||||
// 1. Apply the initial preload with the WHERE clause
|
||||
// 2. Create a recursive preload without the WHERE clause
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
|
||||
|
||||
// Verify the mock query received the operations
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Check that we have at least 2 PreloadRelation calls:
|
||||
// 1. The initial "MastertaskItems" with WHERE clause
|
||||
// 2. The recursive "MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM" without WHERE clause
|
||||
preloadCount := 0
|
||||
recursivePreloadFound := false
|
||||
whereAppliedToRecursive := false
|
||||
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MastertaskItems" {
|
||||
preloadCount++
|
||||
}
|
||||
if op == "PreloadRelation:MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM" {
|
||||
recursivePreloadFound = true
|
||||
}
|
||||
// Check if WHERE was applied to the recursive preload (it shouldn't be)
|
||||
if op == "Where:rid_parentmastertaskitem is null" && recursivePreloadFound {
|
||||
whereAppliedToRecursive = true
|
||||
}
|
||||
}
|
||||
|
||||
if preloadCount < 1 {
|
||||
t.Errorf("Expected at least 1 PreloadRelation call, got %d", preloadCount)
|
||||
}
|
||||
|
||||
if !recursivePreloadFound {
|
||||
t.Errorf("Expected recursive preload 'MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM' to be created. Operations: %v", mock.operations)
|
||||
}
|
||||
|
||||
if whereAppliedToRecursive {
|
||||
t.Error("WHERE clause should not be applied to recursive preload levels")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecursivePreloadWithChildRelations tests that child relations
|
||||
// (like DEF in MAL.DEF) are properly extended to recursive levels
|
||||
func TestRecursivePreloadWithChildRelations(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
// Create the main recursive preload
|
||||
recursivePreload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
Where: "rid_parentmastertaskitem is null",
|
||||
}
|
||||
|
||||
// Create a child relation that should be extended
|
||||
childPreload := common.PreloadOption{
|
||||
Relation: "MAL.DEF",
|
||||
}
|
||||
|
||||
mockQuery := &mockSelectQuery{
|
||||
operations: []string{},
|
||||
}
|
||||
|
||||
allPreloads := []common.PreloadOption{recursivePreload, childPreload}
|
||||
|
||||
// Apply both preloads - the child preload should be extended when the recursive one processes
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, allPreloads, nil, 0)
|
||||
|
||||
// Also need to apply the child preload separately (as would happen in normal flow)
|
||||
result = handler.applyPreloadWithRecursion(result, childPreload, allPreloads, nil, 0)
|
||||
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Check that the child relation was extended to recursive levels
|
||||
// We should see:
|
||||
// - MAL (with WHERE)
|
||||
// - MAL.DEF
|
||||
// - MAL.MAL_RID_PARENTMASTERTASKITEM (without WHERE)
|
||||
// - MAL.MAL_RID_PARENTMASTERTASKITEM.DEF (extended by recursive logic)
|
||||
foundMALDEF := false
|
||||
foundRecursiveMAL := false
|
||||
foundMALMALDEF := false
|
||||
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.DEF" {
|
||||
foundMALDEF = true
|
||||
}
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundRecursiveMAL = true
|
||||
}
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM.DEF" {
|
||||
foundMALMALDEF = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundMALDEF {
|
||||
t.Errorf("Expected child preload 'MAL.DEF' to be applied. Operations: %v", mock.operations)
|
||||
}
|
||||
|
||||
if !foundRecursiveMAL {
|
||||
t.Errorf("Expected recursive preload 'MAL.MAL_RID_PARENTMASTERTASKITEM' to be created. Operations: %v", mock.operations)
|
||||
}
|
||||
|
||||
if !foundMALMALDEF {
|
||||
t.Errorf("Expected child preload to be extended to 'MAL.MAL_RID_PARENTMASTERTASKITEM.DEF' at recursive level. Operations: %v", mock.operations)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecursivePreloadGeneratesCorrectRelationName tests that the recursive
|
||||
// preload generates the correct FK-based relation name using RelatedKey
|
||||
func TestRecursivePreloadGeneratesCorrectRelationName(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
// Test case 1: With RelatedKey - should generate FK-based name
|
||||
t.Run("WithRelatedKey", func(t *testing.T) {
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
}
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
|
||||
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Should generate MAL.MAL_RID_PARENTMASTERTASKITEM
|
||||
foundCorrectRelation := false
|
||||
foundIncorrectRelation := false
|
||||
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundCorrectRelation = true
|
||||
}
|
||||
if op == "PreloadRelation:MAL.MAL" {
|
||||
foundIncorrectRelation = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundCorrectRelation {
|
||||
t.Errorf("Expected 'MAL.MAL_RID_PARENTMASTERTASKITEM' relation, operations: %v", mock.operations)
|
||||
}
|
||||
|
||||
if foundIncorrectRelation {
|
||||
t.Error("Should NOT generate 'MAL.MAL' relation when RelatedKey is specified")
|
||||
}
|
||||
})
|
||||
|
||||
// Test case 2: Without RelatedKey - should fallback to old behavior
|
||||
t.Run("WithoutRelatedKey", func(t *testing.T) {
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
// No RelatedKey
|
||||
}
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
|
||||
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Should fallback to MAL.MAL
|
||||
foundFallback := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL" {
|
||||
foundFallback = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundFallback {
|
||||
t.Errorf("Expected fallback 'MAL.MAL' relation when no RelatedKey, operations: %v", mock.operations)
|
||||
}
|
||||
})
|
||||
|
||||
// Test case 3: Depth limit of 8
|
||||
t.Run("DepthLimit", func(t *testing.T) {
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
}
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
|
||||
// Start at depth 7 - should create one more level
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 7)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
foundDepth8 := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundDepth8 = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundDepth8 {
|
||||
t.Error("Expected to create recursive level at depth 8")
|
||||
}
|
||||
|
||||
// Start at depth 8 - should NOT create another level
|
||||
mockQuery2 := &mockSelectQuery{operations: []string{}}
|
||||
result2 := handler.applyPreloadWithRecursion(mockQuery2, preload, allPreloads, nil, 8)
|
||||
mock2 := result2.(*mockSelectQuery)
|
||||
|
||||
foundDepth9 := false
|
||||
for _, op := range mock2.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundDepth9 = true
|
||||
}
|
||||
}
|
||||
|
||||
if foundDepth9 {
|
||||
t.Error("Should NOT create recursive level beyond depth 8")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// mockSelectQuery implements common.SelectQuery for testing
|
||||
type mockSelectQuery struct {
|
||||
operations []string
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Model(model interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Model")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Table(table string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Table:"+table)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Column(columns ...string) common.SelectQuery {
|
||||
for _, col := range columns {
|
||||
m.operations = append(m.operations, "Column:"+col)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) ColumnExpr(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "ColumnExpr:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Where(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Where:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) WhereOr(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "WhereOr:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) WhereIn(column string, values interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "WhereIn:"+column)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Order(order string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Order:"+order)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) OrderExpr(order string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "OrderExpr:"+order)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Limit(limit int) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Limit")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Offset(offset int) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Offset")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Join(join string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Join:"+join)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) LeftJoin(join string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "LeftJoin:"+join)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Group(columns string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Group")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Having(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Having:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Preload(relation string, conditions ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Preload:"+relation)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) PreloadRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
|
||||
m.operations = append(m.operations, "PreloadRelation:"+relation)
|
||||
// Apply the preload modifiers
|
||||
for _, fn := range apply {
|
||||
fn(m)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) JoinRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
|
||||
m.operations = append(m.operations, "JoinRelation:"+relation)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Scan(ctx context.Context, dest interface{}) error {
|
||||
m.operations = append(m.operations, "Scan")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) ScanModel(ctx context.Context) error {
|
||||
m.operations = append(m.operations, "ScanModel")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Count(ctx context.Context) (int, error) {
|
||||
m.operations = append(m.operations, "Count")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Exists(ctx context.Context) (bool, error) {
|
||||
m.operations = append(m.operations, "Exists")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) GetUnderlyingQuery() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) GetModel() interface{} {
|
||||
return nil
|
||||
}
|
||||
@@ -125,17 +125,17 @@ func SetupMuxRoutes(muxRouter *mux.Router, handler *Handler, authMiddleware Midd
|
||||
metadataPath := buildRoutePath(schema, entity) + "/metadata"
|
||||
|
||||
// Create handler functions for this specific entity
|
||||
entityHandler := createMuxHandler(handler, schema, entity, "")
|
||||
entityWithIDHandler := createMuxHandler(handler, schema, entity, "id")
|
||||
metadataHandler := createMuxGetHandler(handler, schema, entity, "")
|
||||
var entityHandler http.Handler = createMuxHandler(handler, schema, entity, "")
|
||||
var entityWithIDHandler http.Handler = createMuxHandler(handler, schema, entity, "id")
|
||||
var metadataHandler http.Handler = createMuxGetHandler(handler, schema, entity, "")
|
||||
optionsEntityHandler := createMuxOptionsHandler(handler, schema, entity, []string{"GET", "POST", "OPTIONS"})
|
||||
optionsEntityWithIDHandler := createMuxOptionsHandler(handler, schema, entity, []string{"GET", "PUT", "PATCH", "DELETE", "POST", "OPTIONS"})
|
||||
|
||||
// Apply authentication middleware if provided
|
||||
if authMiddleware != nil {
|
||||
entityHandler = authMiddleware(entityHandler).(http.HandlerFunc)
|
||||
entityWithIDHandler = authMiddleware(entityWithIDHandler).(http.HandlerFunc)
|
||||
metadataHandler = authMiddleware(metadataHandler).(http.HandlerFunc)
|
||||
entityHandler = authMiddleware(entityHandler)
|
||||
entityWithIDHandler = authMiddleware(entityWithIDHandler)
|
||||
metadataHandler = authMiddleware(metadataHandler)
|
||||
// Don't apply auth middleware to OPTIONS - CORS preflight must not require auth
|
||||
}
|
||||
|
||||
@@ -280,9 +280,34 @@ type BunRouterHandler interface {
|
||||
Handle(method, path string, handler bunrouter.HandlerFunc)
|
||||
}
|
||||
|
||||
// wrapBunRouterHandler wraps a bunrouter handler with auth middleware if provided
|
||||
func wrapBunRouterHandler(handler bunrouter.HandlerFunc, authMiddleware MiddlewareFunc) bunrouter.HandlerFunc {
|
||||
if authMiddleware == nil {
|
||||
return handler
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
// Create an http.Handler that calls the bunrouter handler
|
||||
httpHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Replace the embedded *http.Request with the middleware-enriched one
|
||||
// so that auth context (user ID, etc.) is visible to the handler.
|
||||
enrichedReq := req
|
||||
enrichedReq.Request = r
|
||||
_ = handler(w, enrichedReq)
|
||||
})
|
||||
|
||||
// Wrap with auth middleware and execute
|
||||
wrappedHandler := authMiddleware(httpHandler)
|
||||
wrappedHandler.ServeHTTP(w, req.Request)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SetupBunRouterRoutes sets up bunrouter routes for the RestHeadSpec API
|
||||
// Accepts bunrouter.Router or bunrouter.Group
|
||||
func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
// authMiddleware is optional - if provided, routes will be protected with the middleware
|
||||
func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler, authMiddleware MiddlewareFunc) {
|
||||
|
||||
// CORS config
|
||||
corsConfig := common.DefaultCORSConfig()
|
||||
@@ -292,6 +317,14 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
handler.HandleOpenAPI(respAdapter, reqAdapter)
|
||||
return nil
|
||||
})
|
||||
|
||||
r.Handle("OPTIONS", "/openapi", func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewHTTPRequest(req.Request)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -313,7 +346,7 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
currentEntity := entity
|
||||
|
||||
// GET and POST for /{schema}/{entity}
|
||||
r.Handle("GET", entityPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
getEntityHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -324,9 +357,10 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("GET", entityPath, wrapBunRouterHandler(getEntityHandler, authMiddleware))
|
||||
|
||||
r.Handle("POST", entityPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
postEntityHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -337,10 +371,11 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("POST", entityPath, wrapBunRouterHandler(postEntityHandler, authMiddleware))
|
||||
|
||||
// GET, POST, PUT, PATCH, DELETE for /{schema}/{entity}/:id
|
||||
r.Handle("GET", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
getEntityWithIDHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -352,9 +387,10 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("GET", entityWithIDPath, wrapBunRouterHandler(getEntityWithIDHandler, authMiddleware))
|
||||
|
||||
r.Handle("POST", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
postEntityWithIDHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -366,9 +402,10 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("POST", entityWithIDPath, wrapBunRouterHandler(postEntityWithIDHandler, authMiddleware))
|
||||
|
||||
r.Handle("PUT", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
putEntityWithIDHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -380,9 +417,10 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("PUT", entityWithIDPath, wrapBunRouterHandler(putEntityWithIDHandler, authMiddleware))
|
||||
|
||||
r.Handle("PATCH", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
patchEntityWithIDHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -394,9 +432,10 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("PATCH", entityWithIDPath, wrapBunRouterHandler(patchEntityWithIDHandler, authMiddleware))
|
||||
|
||||
r.Handle("DELETE", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
deleteEntityWithIDHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -408,10 +447,11 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.Handle(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("DELETE", entityWithIDPath, wrapBunRouterHandler(deleteEntityWithIDHandler, authMiddleware))
|
||||
|
||||
// Metadata endpoint
|
||||
r.Handle("GET", metadataPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
metadataHandler := func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
common.SetCORSHeaders(respAdapter, reqAdapter, corsConfig)
|
||||
@@ -422,9 +462,11 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
|
||||
handler.HandleGet(respAdapter, reqAdapter, params)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
r.Handle("GET", metadataPath, wrapBunRouterHandler(metadataHandler, authMiddleware))
|
||||
|
||||
// OPTIONS route without ID (returns metadata)
|
||||
// Don't apply auth middleware to OPTIONS - CORS preflight must not require auth
|
||||
r.Handle("OPTIONS", entityPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
@@ -441,6 +483,7 @@ func SetupBunRouterRoutes(r BunRouterHandler, handler *Handler) {
|
||||
})
|
||||
|
||||
// OPTIONS route with ID (returns metadata)
|
||||
// Don't apply auth middleware to OPTIONS - CORS preflight must not require auth
|
||||
r.Handle("OPTIONS", entityWithIDPath, func(w http.ResponseWriter, req bunrouter.Request) error {
|
||||
respAdapter := router.NewHTTPResponseWriter(w)
|
||||
reqAdapter := router.NewBunRouterRequest(req)
|
||||
@@ -466,8 +509,8 @@ func ExampleBunRouterWithBunDB(bunDB *bun.DB) {
|
||||
// Create bunrouter
|
||||
bunRouter := bunrouter.New()
|
||||
|
||||
// Setup routes
|
||||
SetupBunRouterRoutes(bunRouter, handler)
|
||||
// Setup routes without authentication
|
||||
SetupBunRouterRoutes(bunRouter, handler, nil)
|
||||
|
||||
// Start server
|
||||
if err := http.ListenAndServe(":8080", bunRouter); err != nil {
|
||||
@@ -487,7 +530,7 @@ func ExampleBunRouterWithGroup(bunDB *bun.DB) {
|
||||
apiGroup := bunRouter.NewGroup("/api")
|
||||
|
||||
// Setup RestHeadSpec routes on the group - routes will be under /api
|
||||
SetupBunRouterRoutes(apiGroup, handler)
|
||||
SetupBunRouterRoutes(apiGroup, handler, nil)
|
||||
|
||||
// Start server
|
||||
if err := http.ListenAndServe(":8080", bunRouter); err != nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package restheadspec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
@@ -9,6 +10,17 @@ import (
|
||||
|
||||
// RegisterSecurityHooks registers all security-related hooks with the handler
|
||||
func RegisterSecurityHooks(handler *Handler, securityList *security.SecurityList) {
|
||||
// Hook 0: BeforeHandle - enforce auth after model resolution
|
||||
handler.Hooks().Register(BeforeHandle, func(hookCtx *HookContext) error {
|
||||
if err := security.CheckModelAuthAllowed(newSecurityContext(hookCtx), hookCtx.Operation); err != nil {
|
||||
hookCtx.Abort = true
|
||||
hookCtx.AbortMessage = err.Error()
|
||||
hookCtx.AbortCode = http.StatusUnauthorized
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Hook 1: BeforeRead - Load security rules
|
||||
handler.Hooks().Register(BeforeRead, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
@@ -33,6 +45,18 @@ func RegisterSecurityHooks(handler *Handler, securityList *security.SecurityList
|
||||
return security.LogDataAccess(secCtx)
|
||||
})
|
||||
|
||||
// Hook 5: BeforeUpdate - enforce CanUpdate rule from context/registry
|
||||
handler.Hooks().Register(BeforeUpdate, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.CheckModelUpdateAllowed(secCtx)
|
||||
})
|
||||
|
||||
// Hook 6: BeforeDelete - enforce CanDelete rule from context/registry
|
||||
handler.Hooks().Register(BeforeDelete, func(hookCtx *HookContext) error {
|
||||
secCtx := newSecurityContext(hookCtx)
|
||||
return security.CheckModelDeleteAllowed(secCtx)
|
||||
})
|
||||
|
||||
logger.Info("Security hooks registered for restheadspec handler")
|
||||
}
|
||||
|
||||
|
||||
527
pkg/restheadspec/xfiles_integration_test.go
Normal file
527
pkg/restheadspec/xfiles_integration_test.go
Normal file
@@ -0,0 +1,527 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package restheadspec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// mockSelectQuery implements common.SelectQuery for testing (integration version)
|
||||
type mockSelectQuery struct {
|
||||
operations []string
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Model(model interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Model")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Table(table string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Table:"+table)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Column(columns ...string) common.SelectQuery {
|
||||
for _, col := range columns {
|
||||
m.operations = append(m.operations, "Column:"+col)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) ColumnExpr(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "ColumnExpr:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Where(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Where:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) WhereOr(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "WhereOr:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) WhereIn(column string, values interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "WhereIn:"+column)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Order(order string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Order:"+order)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) OrderExpr(order string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "OrderExpr:"+order)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Limit(limit int) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Limit")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Offset(offset int) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Offset")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Join(join string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Join:"+join)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) LeftJoin(join string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "LeftJoin:"+join)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Group(columns string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Group")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Having(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Having:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Preload(relation string, conditions ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Preload:"+relation)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) PreloadRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
|
||||
m.operations = append(m.operations, "PreloadRelation:"+relation)
|
||||
// Apply the preload modifiers
|
||||
for _, fn := range apply {
|
||||
fn(m)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) JoinRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
|
||||
m.operations = append(m.operations, "JoinRelation:"+relation)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Scan(ctx context.Context, dest interface{}) error {
|
||||
m.operations = append(m.operations, "Scan")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) ScanModel(ctx context.Context) error {
|
||||
m.operations = append(m.operations, "ScanModel")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Count(ctx context.Context) (int, error) {
|
||||
m.operations = append(m.operations, "Count")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Exists(ctx context.Context) (bool, error) {
|
||||
m.operations = append(m.operations, "Exists")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) GetUnderlyingQuery() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) GetModel() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestXFilesRecursivePreload is an integration test that validates the XFiles
|
||||
// recursive preload functionality using real test data files.
|
||||
//
|
||||
// This test ensures:
|
||||
// 1. XFiles request JSON is correctly parsed into PreloadOptions
|
||||
// 2. Recursive preload generates correct FK-based relation names (MAL_RID_PARENTMASTERTASKITEM)
|
||||
// 3. Parent WHERE clauses don't leak to child levels
|
||||
// 4. Child relations (like DEF) are extended to all recursive levels
|
||||
// 5. Hierarchical data structure matches expected output
|
||||
func TestXFilesRecursivePreload(t *testing.T) {
|
||||
// Load the XFiles request configuration
|
||||
requestPath := filepath.Join("..", "..", "tests", "data", "xfiles.request.json")
|
||||
requestData, err := os.ReadFile(requestPath)
|
||||
require.NoError(t, err, "Failed to read xfiles.request.json")
|
||||
|
||||
var xfileConfig XFiles
|
||||
err = json.Unmarshal(requestData, &xfileConfig)
|
||||
require.NoError(t, err, "Failed to parse xfiles.request.json")
|
||||
|
||||
// Create handler and parse XFiles into PreloadOptions
|
||||
handler := &Handler{}
|
||||
options := &ExtendedRequestOptions{
|
||||
RequestOptions: common.RequestOptions{
|
||||
Preload: []common.PreloadOption{},
|
||||
},
|
||||
}
|
||||
|
||||
// Process the XFiles configuration - start with the root table
|
||||
handler.processXFilesRelations(&xfileConfig, options, "")
|
||||
|
||||
// Verify that preload options were created
|
||||
require.NotEmpty(t, options.Preload, "Expected preload options to be created")
|
||||
|
||||
// Test 1: Verify mastertaskitem preload is marked as recursive with correct RelatedKey
|
||||
t.Run("RecursivePreloadHasRelatedKey", func(t *testing.T) {
|
||||
// Find the mastertaskitem preload - it should be marked as recursive
|
||||
var recursivePreload *common.PreloadOption
|
||||
for i := range options.Preload {
|
||||
preload := &options.Preload[i]
|
||||
if preload.Relation == "MTL.MAL" && preload.Recursive {
|
||||
recursivePreload = preload
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, recursivePreload, "Expected to find recursive mastertaskitem preload MTL.MAL")
|
||||
|
||||
// RelatedKey should be the parent relationship key (MTL -> MAL)
|
||||
assert.Equal(t, "rid_mastertask", recursivePreload.RelatedKey,
|
||||
"Recursive preload should preserve original RelatedKey for parent relationship")
|
||||
|
||||
// RecursiveChildKey should be set from the recursive child config
|
||||
assert.Equal(t, "rid_parentmastertaskitem", recursivePreload.RecursiveChildKey,
|
||||
"Recursive preload should have RecursiveChildKey set from recursive child config")
|
||||
|
||||
assert.True(t, recursivePreload.Recursive, "mastertaskitem preload should be marked as recursive")
|
||||
})
|
||||
|
||||
// Test 2: Verify mastertaskitem has WHERE clause for filtering root items
|
||||
t.Run("RootLevelHasWhereClause", func(t *testing.T) {
|
||||
var rootPreload *common.PreloadOption
|
||||
for i := range options.Preload {
|
||||
preload := &options.Preload[i]
|
||||
if preload.Relation == "MTL.MAL" {
|
||||
rootPreload = preload
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, rootPreload, "Expected to find mastertaskitem preload")
|
||||
assert.NotEmpty(t, rootPreload.Where, "Mastertaskitem should have WHERE clause")
|
||||
// The WHERE clause should filter for root items (rid_parentmastertaskitem is null)
|
||||
assert.True(t, rootPreload.Recursive, "Mastertaskitem preload should be marked as recursive")
|
||||
})
|
||||
|
||||
// Test 3: Verify actiondefinition relation exists for mastertaskitem
|
||||
t.Run("DEFRelationExists", func(t *testing.T) {
|
||||
var defPreload *common.PreloadOption
|
||||
for i := range options.Preload {
|
||||
preload := &options.Preload[i]
|
||||
if preload.Relation == "MTL.MAL.DEF" {
|
||||
defPreload = preload
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, defPreload, "Expected to find actiondefinition preload for mastertaskitem")
|
||||
assert.Equal(t, "rid_actiondefinition", defPreload.ForeignKey,
|
||||
"actiondefinition preload should have ForeignKey set")
|
||||
})
|
||||
|
||||
// Test 4: Verify relation name generation with mock query
|
||||
t.Run("RelationNameGeneration", func(t *testing.T) {
|
||||
// Find the mastertaskitem preload - it should be marked as recursive
|
||||
var recursivePreload common.PreloadOption
|
||||
found := false
|
||||
for _, preload := range options.Preload {
|
||||
if preload.Relation == "MTL.MAL" && preload.Recursive {
|
||||
recursivePreload = preload
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, found, "Expected to find recursive mastertaskitem preload MTL.MAL")
|
||||
|
||||
// Create mock query to track operations
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
|
||||
// Apply the recursive preload
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Verify the correct FK-based relation name was generated
|
||||
foundCorrectRelation := false
|
||||
|
||||
for _, op := range mock.operations {
|
||||
// Should generate: MTL.MAL.MAL_RID_PARENTMASTERTASKITEM
|
||||
if op == "PreloadRelation:MTL.MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundCorrectRelation = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundCorrectRelation,
|
||||
"Expected FK-based relation name 'MTL.MAL.MAL_RID_PARENTMASTERTASKITEM' to be generated. Operations: %v",
|
||||
mock.operations)
|
||||
})
|
||||
|
||||
// Test 5: Verify WHERE clause is cleared for recursive levels
|
||||
t.Run("WhereClauseClearedForChildren", func(t *testing.T) {
|
||||
// Find the mastertaskitem preload - it should be marked as recursive
|
||||
var recursivePreload common.PreloadOption
|
||||
found := false
|
||||
for _, preload := range options.Preload {
|
||||
if preload.Relation == "MTL.MAL" && preload.Recursive {
|
||||
recursivePreload = preload
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, found, "Expected to find recursive mastertaskitem preload MTL.MAL")
|
||||
|
||||
// The root level has a WHERE clause (rid_parentmastertaskitem is null)
|
||||
// But when we apply recursion, it should be cleared
|
||||
assert.NotEmpty(t, recursivePreload.Where, "Root preload should have WHERE clause")
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// After the first level, WHERE clauses should not be reapplied
|
||||
// We check that the recursive relation was created (which means WHERE was cleared internally)
|
||||
foundRecursiveRelation := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MTL.MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundRecursiveRelation = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundRecursiveRelation,
|
||||
"Recursive relation should be created (WHERE clause should be cleared internally)")
|
||||
})
|
||||
|
||||
// Test 6: Verify child relations are extended to recursive levels
|
||||
t.Run("ChildRelationsExtended", func(t *testing.T) {
|
||||
// Find the mastertaskitem preload - it should be marked as recursive
|
||||
var recursivePreload common.PreloadOption
|
||||
foundRecursive := false
|
||||
|
||||
for _, preload := range options.Preload {
|
||||
if preload.Relation == "MTL.MAL" && preload.Recursive {
|
||||
recursivePreload = preload
|
||||
foundRecursive = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, foundRecursive, "Expected to find recursive mastertaskitem preload MTL.MAL")
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// actiondefinition should be extended to the recursive level
|
||||
// Expected: MTL.MAL.MAL_RID_PARENTMASTERTASKITEM.DEF
|
||||
foundExtendedDEF := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MTL.MAL.MAL_RID_PARENTMASTERTASKITEM.DEF" {
|
||||
foundExtendedDEF = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundExtendedDEF,
|
||||
"Expected actiondefinition relation to be extended to recursive level. Operations: %v",
|
||||
mock.operations)
|
||||
})
|
||||
}
|
||||
|
||||
// TestXFilesRecursivePreloadDepth tests that recursive preloads respect the depth limit of 8
|
||||
func TestXFilesRecursivePreloadDepth(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
}
|
||||
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
|
||||
t.Run("Depth7CreatesLevel8", func(t *testing.T) {
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 7)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
foundDepth8 := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundDepth8 = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundDepth8, "Should create level 8 when starting at depth 7")
|
||||
})
|
||||
|
||||
t.Run("Depth8DoesNotCreateLevel9", func(t *testing.T) {
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 8)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
foundDepth9 := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundDepth9 = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.False(t, foundDepth9, "Should NOT create level 9 (depth limit is 8)")
|
||||
})
|
||||
}
|
||||
|
||||
// TestXFilesResponseStructure validates the actual structure of the response
|
||||
// This test can be expanded when we have a full database integration test environment
|
||||
func TestXFilesResponseStructure(t *testing.T) {
|
||||
// Load the expected correct response
|
||||
correctResponsePath := filepath.Join("..", "..", "tests", "data", "xfiles.response.correct.json")
|
||||
correctData, err := os.ReadFile(correctResponsePath)
|
||||
require.NoError(t, err, "Failed to read xfiles.response.correct.json")
|
||||
|
||||
var correctResponse []map[string]interface{}
|
||||
err = json.Unmarshal(correctData, &correctResponse)
|
||||
require.NoError(t, err, "Failed to parse xfiles.response.correct.json")
|
||||
|
||||
// Test 1: Verify root level has exactly 1 masterprocess
|
||||
t.Run("RootLevelHasOneItem", func(t *testing.T) {
|
||||
assert.Len(t, correctResponse, 1, "Root level should have exactly 1 masterprocess record")
|
||||
})
|
||||
|
||||
// Test 2: Verify the root item has MTL relation
|
||||
t.Run("RootHasMTLRelation", func(t *testing.T) {
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, exists := rootItem["MTL"]
|
||||
assert.True(t, exists, "Root item should have MTL relation")
|
||||
assert.NotNil(t, mtl, "MTL relation should not be null")
|
||||
})
|
||||
|
||||
// Test 3: Verify MTL has MAL items
|
||||
t.Run("MTLHasMALItems", func(t *testing.T) {
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, ok := rootItem["MTL"].([]interface{})
|
||||
require.True(t, ok, "MTL should be an array")
|
||||
require.NotEmpty(t, mtl, "MTL should have items")
|
||||
|
||||
firstMTL, ok := mtl[0].(map[string]interface{})
|
||||
require.True(t, ok, "MTL item should be a map")
|
||||
|
||||
mal, exists := firstMTL["MAL"]
|
||||
assert.True(t, exists, "MTL item should have MAL relation")
|
||||
assert.NotNil(t, mal, "MAL relation should not be null")
|
||||
})
|
||||
|
||||
// Test 4: Verify MAL items have MAL_RID_PARENTMASTERTASKITEM relation (recursive)
|
||||
t.Run("MALHasRecursiveRelation", func(t *testing.T) {
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, ok := rootItem["MTL"].([]interface{})
|
||||
require.True(t, ok, "MTL should be an array")
|
||||
require.NotEmpty(t, mtl, "MTL should have items")
|
||||
|
||||
firstMTL, ok := mtl[0].(map[string]interface{})
|
||||
require.True(t, ok, "MTL item should be a map")
|
||||
|
||||
mal, ok := firstMTL["MAL"].([]interface{})
|
||||
require.True(t, ok, "MAL should be an array")
|
||||
require.NotEmpty(t, mal, "MAL should have items")
|
||||
|
||||
firstMAL, ok := mal[0].(map[string]interface{})
|
||||
require.True(t, ok, "MAL item should be a map")
|
||||
|
||||
// The key assertion: check for FK-based relation name
|
||||
recursiveRelation, exists := firstMAL["MAL_RID_PARENTMASTERTASKITEM"]
|
||||
assert.True(t, exists,
|
||||
"MAL item should have MAL_RID_PARENTMASTERTASKITEM relation (FK-based name)")
|
||||
|
||||
// It can be null or an array, depending on whether this item has children
|
||||
if recursiveRelation != nil {
|
||||
_, isArray := recursiveRelation.([]interface{})
|
||||
assert.True(t, isArray,
|
||||
"MAL_RID_PARENTMASTERTASKITEM should be an array when not null")
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Verify "Receive COB Document for" appears as a child, not at root
|
||||
t.Run("ChildItemsAreNested", func(t *testing.T) {
|
||||
// This test verifies that "Receive COB Document for" doesn't appear
|
||||
// multiple times at the wrong level, but is properly nested
|
||||
|
||||
// Count how many times we find this description at the MAL level (should be 0 or 1)
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, ok := rootItem["MTL"].([]interface{})
|
||||
require.True(t, ok, "MTL should be an array")
|
||||
require.NotEmpty(t, mtl, "MTL should have items")
|
||||
|
||||
firstMTL, ok := mtl[0].(map[string]interface{})
|
||||
require.True(t, ok, "MTL item should be a map")
|
||||
|
||||
mal, ok := firstMTL["MAL"].([]interface{})
|
||||
require.True(t, ok, "MAL should be an array")
|
||||
|
||||
// Count root-level MAL items (before the fix, there were 12; should be 1)
|
||||
assert.Len(t, mal, 1,
|
||||
"MAL should have exactly 1 root-level item (before fix: 12 duplicates)")
|
||||
|
||||
// Verify the root item has a description
|
||||
firstMAL, ok := mal[0].(map[string]interface{})
|
||||
require.True(t, ok, "MAL item should be a map")
|
||||
|
||||
description, exists := firstMAL["description"]
|
||||
assert.True(t, exists, "MAL item should have a description")
|
||||
assert.Equal(t, "Capture COB Information", description,
|
||||
"Root MAL item should be 'Capture COB Information'")
|
||||
})
|
||||
|
||||
// Test 6: Verify DEF relation exists at MAL level
|
||||
t.Run("DEFRelationExists", func(t *testing.T) {
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, ok := rootItem["MTL"].([]interface{})
|
||||
require.True(t, ok, "MTL should be an array")
|
||||
require.NotEmpty(t, mtl, "MTL should have items")
|
||||
|
||||
firstMTL, ok := mtl[0].(map[string]interface{})
|
||||
require.True(t, ok, "MTL item should be a map")
|
||||
|
||||
mal, ok := firstMTL["MAL"].([]interface{})
|
||||
require.True(t, ok, "MAL should be an array")
|
||||
require.NotEmpty(t, mal, "MAL should have items")
|
||||
|
||||
firstMAL, ok := mal[0].(map[string]interface{})
|
||||
require.True(t, ok, "MAL item should be a map")
|
||||
|
||||
// Verify DEF relation exists (child relation extension)
|
||||
def, exists := firstMAL["DEF"]
|
||||
assert.True(t, exists, "MAL item should have DEF relation")
|
||||
|
||||
// DEF can be null or an object
|
||||
if def != nil {
|
||||
_, isMap := def.(map[string]interface{})
|
||||
assert.True(t, isMap, "DEF should be an object when not null")
|
||||
}
|
||||
})
|
||||
}
|
||||
153
pkg/security/KEYSTORE.md
Normal file
153
pkg/security/KEYSTORE.md
Normal file
@@ -0,0 +1,153 @@
|
||||
# Keystore
|
||||
|
||||
Per-user named auth keys with pluggable storage. Each user can hold multiple keys of different types — JWT secrets, header API keys, OAuth2 client credentials, or generic API keys. Keys are identified by a human-readable name ("CI deploy", "mobile app") and can carry scopes and arbitrary metadata.
|
||||
|
||||
## Key types
|
||||
|
||||
| Constant | Value | Use case |
|
||||
|---|---|---|
|
||||
| `KeyTypeJWTSecret` | `jwt_secret` | Per-user JWT signing secret |
|
||||
| `KeyTypeHeaderAPI` | `header_api` | Static API key sent in a request header |
|
||||
| `KeyTypeOAuth2` | `oauth2` | OAuth2 client credentials |
|
||||
| `KeyTypeGenericAPI` | `api` | General-purpose application key |
|
||||
|
||||
## Storage backends
|
||||
|
||||
### ConfigKeyStore
|
||||
|
||||
In-memory store seeded from a static list. Suitable for a small, fixed set of service-account keys loaded from a config file. Keys created at runtime via `CreateKey` are held in memory and lost on restart.
|
||||
|
||||
```go
|
||||
// Pre-load keys from config (KeyHash = SHA-256 hex of the raw key)
|
||||
store := security.NewConfigKeyStore([]security.UserKey{
|
||||
{
|
||||
UserID: 1,
|
||||
KeyType: security.KeyTypeGenericAPI,
|
||||
KeyHash: "e3b0c44298fc1c149afb...", // sha256(rawKey)
|
||||
Name: "CI deploy",
|
||||
Scopes: []string{"deploy"},
|
||||
IsActive: true,
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
### DatabaseKeyStore
|
||||
|
||||
Backed by PostgreSQL stored procedures. Supports optional caching (default 2-minute TTL). Apply `keystore_schema.sql` before use.
|
||||
|
||||
```go
|
||||
db, _ := sql.Open("postgres", dsn)
|
||||
|
||||
store := security.NewDatabaseKeyStore(db)
|
||||
|
||||
// With options
|
||||
store = security.NewDatabaseKeyStore(db, security.DatabaseKeyStoreOptions{
|
||||
CacheTTL: 5 * time.Minute,
|
||||
SQLNames: &security.KeyStoreSQLNames{
|
||||
ValidateKey: "myapp_keystore_validate", // override one procedure name
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
## Managing keys
|
||||
|
||||
```go
|
||||
ctx := context.Background()
|
||||
|
||||
// Create — raw key returned once; store it securely
|
||||
resp, err := store.CreateKey(ctx, security.CreateKeyRequest{
|
||||
UserID: 42,
|
||||
KeyType: security.KeyTypeGenericAPI,
|
||||
Name: "mobile app",
|
||||
Scopes: []string{"read", "write"},
|
||||
})
|
||||
fmt.Println(resp.RawKey) // only shown here; hashed internally
|
||||
|
||||
// List
|
||||
keys, err := store.GetUserKeys(ctx, 42, "") // "" = all types
|
||||
keys, err = store.GetUserKeys(ctx, 42, security.KeyTypeGenericAPI)
|
||||
|
||||
// Revoke
|
||||
err = store.DeleteKey(ctx, 42, resp.Key.ID)
|
||||
|
||||
// Validate (used by authenticators internally)
|
||||
key, err := store.ValidateKey(ctx, rawKey, "")
|
||||
```
|
||||
|
||||
## HTTP authentication
|
||||
|
||||
`KeyStoreAuthenticator` wraps any `KeyStore` and implements the `Authenticator` interface. It is drop-in compatible with `DatabaseAuthenticator` and works in `CompositeSecurityProvider`.
|
||||
|
||||
Keys are extracted from the request in this order:
|
||||
|
||||
1. `Authorization: Bearer <key>`
|
||||
2. `Authorization: ApiKey <key>`
|
||||
3. `X-API-Key: <key>`
|
||||
|
||||
```go
|
||||
auth := security.NewKeyStoreAuthenticator(store, "") // "" = accept any key type
|
||||
// Restrict to a specific type:
|
||||
auth = security.NewKeyStoreAuthenticator(store, security.KeyTypeGenericAPI)
|
||||
```
|
||||
|
||||
Plug it into a handler:
|
||||
|
||||
```go
|
||||
handler := resolvespec.NewHandler(db, registry,
|
||||
resolvespec.WithAuthenticator(auth),
|
||||
)
|
||||
```
|
||||
|
||||
`Login` and `Logout` return an error — key lifecycle is managed through `KeyStore` directly.
|
||||
|
||||
On successful validation the request context receives a `UserContext` where:
|
||||
|
||||
- `UserID` — from the key
|
||||
- `Roles` — the key's `Scopes`
|
||||
- `Claims["key_type"]` — key type string
|
||||
- `Claims["key_name"]` — key name
|
||||
|
||||
## Database setup
|
||||
|
||||
Apply `keystore_schema.sql` to your PostgreSQL database. It requires the `users` table from the main `database_schema.sql`.
|
||||
|
||||
```sql
|
||||
\i pkg/security/keystore_schema.sql
|
||||
```
|
||||
|
||||
This creates:
|
||||
|
||||
- `user_keys` table with indexes on `user_id`, `key_hash`, and `key_type`
|
||||
- `resolvespec_keystore_get_user_keys(p_user_id, p_key_type)`
|
||||
- `resolvespec_keystore_create_key(p_request jsonb)`
|
||||
- `resolvespec_keystore_delete_key(p_user_id, p_key_id)`
|
||||
- `resolvespec_keystore_validate_key(p_key_hash, p_key_type)`
|
||||
|
||||
### Custom procedure names
|
||||
|
||||
```go
|
||||
store := security.NewDatabaseKeyStore(db, security.DatabaseKeyStoreOptions{
|
||||
SQLNames: &security.KeyStoreSQLNames{
|
||||
GetUserKeys: "myschema_get_keys",
|
||||
CreateKey: "myschema_create_key",
|
||||
DeleteKey: "myschema_delete_key",
|
||||
ValidateKey: "myschema_validate_key",
|
||||
},
|
||||
})
|
||||
|
||||
// Validate names at startup
|
||||
names := &security.KeyStoreSQLNames{
|
||||
GetUserKeys: "myschema_get_keys",
|
||||
// ...
|
||||
}
|
||||
if err := security.ValidateKeyStoreSQLNames(names); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Security notes
|
||||
|
||||
- Raw keys are never stored. Only the SHA-256 hex digest is persisted.
|
||||
- The raw key is generated with `crypto/rand` (32 bytes, base64url-encoded) and returned exactly once in `CreateKeyResponse.RawKey`.
|
||||
- Hash comparisons in `ConfigKeyStore` use `crypto/subtle.ConstantTimeCompare` to prevent timing side-channels.
|
||||
- `DeleteKey` performs a soft delete (`is_active = false`). The `DatabaseKeyStore` invalidates the cache entry immediately, but due to the cache TTL a revoked key may authenticate for up to `CacheTTL` (default 2 minutes) in a distributed environment. Set `CacheTTL: 0` to disable caching if immediate revocation is required.
|
||||
527
pkg/security/OAUTH2.md
Normal file
527
pkg/security/OAUTH2.md
Normal file
@@ -0,0 +1,527 @@
|
||||
# OAuth2 Authentication Guide
|
||||
|
||||
## Overview
|
||||
|
||||
The security package provides OAuth2 authentication support for any OAuth2-compliant provider including Google, GitHub, Microsoft, Facebook, and custom providers.
|
||||
|
||||
## Features
|
||||
|
||||
- **Universal OAuth2 Support**: Works with any OAuth2 provider
|
||||
- **Pre-configured Providers**: Google, GitHub, Microsoft, Facebook
|
||||
- **Multi-Provider Support**: Use all OAuth2 providers simultaneously
|
||||
- **Custom Providers**: Easy configuration for any OAuth2 service
|
||||
- **Session Management**: Database-backed session storage
|
||||
- **Token Refresh**: Automatic token refresh support
|
||||
- **State Validation**: Built-in CSRF protection
|
||||
- **User Auto-Creation**: Automatically creates users on first login
|
||||
- **Unified Authentication**: OAuth2 and traditional auth share same session storage
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Database Setup
|
||||
|
||||
```sql
|
||||
-- Run the schema from database_schema.sql
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
username VARCHAR(255) NOT NULL UNIQUE,
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
password VARCHAR(255),
|
||||
user_level INTEGER DEFAULT 0,
|
||||
roles VARCHAR(500),
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
last_login_at TIMESTAMP,
|
||||
remote_id VARCHAR(255),
|
||||
auth_provider VARCHAR(50)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_sessions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
session_token VARCHAR(500) NOT NULL UNIQUE,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
last_activity_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
ip_address VARCHAR(45),
|
||||
user_agent TEXT,
|
||||
access_token TEXT,
|
||||
refresh_token TEXT,
|
||||
token_type VARCHAR(50) DEFAULT 'Bearer',
|
||||
auth_provider VARCHAR(50)
|
||||
);
|
||||
|
||||
-- OAuth2 stored procedures (7 functions)
|
||||
-- See database_schema.sql for full implementation
|
||||
```
|
||||
|
||||
### 2. Google OAuth2
|
||||
|
||||
```go
|
||||
import "github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
|
||||
// Create authenticator
|
||||
oauth2Auth := security.NewGoogleAuthenticator(
|
||||
"your-google-client-id",
|
||||
"your-google-client-secret",
|
||||
"http://localhost:8080/auth/google/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
// Login route - redirects to Google
|
||||
router.HandleFunc("/auth/google/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := oauth2Auth.OAuth2GenerateState()
|
||||
authURL, _ := oauth2Auth.OAuth2GetAuthURL(state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
// Callback route - handles Google response
|
||||
router.HandleFunc("/auth/google/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
state := r.URL.Query().Get("state")
|
||||
|
||||
loginResp, err := oauth2Auth.OAuth2HandleCallback(r.Context(), code, state)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Set session cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResp.Token,
|
||||
Path: "/",
|
||||
MaxAge: int(loginResp.ExpiresIn),
|
||||
HttpOnly: true,
|
||||
Secure: true,
|
||||
})
|
||||
|
||||
http.Redirect(w, r, "/dashboard", http.StatusTemporaryRedirect)
|
||||
})
|
||||
```
|
||||
|
||||
### 3. GitHub OAuth2
|
||||
|
||||
```go
|
||||
oauth2Auth := security.NewGitHubAuthenticator(
|
||||
"your-github-client-id",
|
||||
"your-github-client-secret",
|
||||
"http://localhost:8080/auth/github/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
// Same routes pattern as Google
|
||||
router.HandleFunc("/auth/github/login", ...)
|
||||
router.HandleFunc("/auth/github/callback", ...)
|
||||
```
|
||||
|
||||
### 4. Microsoft OAuth2
|
||||
|
||||
```go
|
||||
oauth2Auth := security.NewMicrosoftAuthenticator(
|
||||
"your-microsoft-client-id",
|
||||
"your-microsoft-client-secret",
|
||||
"http://localhost:8080/auth/microsoft/callback",
|
||||
db,
|
||||
)
|
||||
```
|
||||
|
||||
### 5. Facebook OAuth2
|
||||
|
||||
```go
|
||||
oauth2Auth := security.NewFacebookAuthenticator(
|
||||
"your-facebook-client-id",
|
||||
"your-facebook-client-secret",
|
||||
"http://localhost:8080/auth/facebook/callback",
|
||||
db,
|
||||
)
|
||||
```
|
||||
|
||||
## Custom OAuth2 Provider
|
||||
|
||||
```go
|
||||
oauth2Auth := security.NewDatabaseAuthenticator(db).WithOAuth2(security.OAuth2Config{
|
||||
ClientID: "your-client-id",
|
||||
ClientSecret: "your-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/callback",
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://your-provider.com/oauth/authorize",
|
||||
TokenURL: "https://your-provider.com/oauth/token",
|
||||
UserInfoURL: "https://your-provider.com/oauth/userinfo",
|
||||
DB: db,
|
||||
ProviderName: "custom",
|
||||
|
||||
// Optional: Custom user info parser
|
||||
UserInfoParser: func(userInfo map[string]any) (*security.UserContext, error) {
|
||||
return &security.UserContext{
|
||||
UserName: userInfo["username"].(string),
|
||||
Email: userInfo["email"].(string),
|
||||
RemoteID: userInfo["id"].(string),
|
||||
UserLevel: 1,
|
||||
Roles: []string{"user"},
|
||||
Claims: userInfo,
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
## Protected Routes
|
||||
|
||||
```go
|
||||
// Create security provider
|
||||
colSec := security.NewDatabaseColumnSecurityProvider(db)
|
||||
rowSec := security.NewDatabaseRowSecurityProvider(db)
|
||||
provider, _ := security.NewCompositeSecurityProvider(oauth2Auth, colSec, rowSec)
|
||||
securityList, _ := security.NewSecurityList(provider)
|
||||
|
||||
// Apply middleware to protected routes
|
||||
protectedRouter := router.PathPrefix("/api").Subrouter()
|
||||
protectedRouter.Use(security.NewAuthMiddleware(securityList))
|
||||
protectedRouter.Use(security.SetSecurityMiddleware(securityList))
|
||||
|
||||
protectedRouter.HandleFunc("/profile", func(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, _ := security.GetUserContext(r.Context())
|
||||
json.NewEncoder(w).Encode(userCtx)
|
||||
})
|
||||
```
|
||||
|
||||
## Token Refresh
|
||||
|
||||
OAuth2 access tokens expire after a period of time. Use the refresh token to obtain a new access token without requiring the user to log in again.
|
||||
|
||||
```go
|
||||
router.HandleFunc("/auth/refresh", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Provider string `json:"provider"` // "google", "github", etc.
|
||||
}
|
||||
json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
// Default to google if not specified
|
||||
if req.Provider == "" {
|
||||
req.Provider = "google"
|
||||
}
|
||||
|
||||
// Use OAuth2-specific refresh method
|
||||
loginResp, err := oauth2Auth.OAuth2RefreshToken(r.Context(), req.RefreshToken, req.Provider)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Set new session cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResp.Token,
|
||||
Path: "/",
|
||||
MaxAge: int(loginResp.ExpiresIn),
|
||||
HttpOnly: true,
|
||||
Secure: true,
|
||||
})
|
||||
|
||||
json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
```
|
||||
|
||||
**Important Notes:**
|
||||
- The refresh token is returned in the `LoginResponse.RefreshToken` field after successful OAuth2 callback
|
||||
- Store the refresh token securely on the client side
|
||||
- Each provider must be configured with the appropriate scopes to receive a refresh token (e.g., `access_type=offline` for Google)
|
||||
- The `OAuth2RefreshToken` method requires the provider name to identify which OAuth2 provider to use for refreshing
|
||||
|
||||
## Logout
|
||||
|
||||
```go
|
||||
router.HandleFunc("/auth/logout", func(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, _ := security.GetUserContext(r.Context())
|
||||
|
||||
oauth2Auth.Logout(r.Context(), security.LogoutRequest{
|
||||
Token: userCtx.SessionID,
|
||||
UserID: userCtx.UserID,
|
||||
})
|
||||
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: "",
|
||||
MaxAge: -1,
|
||||
})
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
```
|
||||
|
||||
## Multi-Provider Setup
|
||||
|
||||
```go
|
||||
// Single DatabaseAuthenticator with ALL OAuth2 providers
|
||||
auth := security.NewDatabaseAuthenticator(db).
|
||||
WithOAuth2(security.OAuth2Config{
|
||||
ClientID: "google-client-id",
|
||||
ClientSecret: "google-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/google/callback",
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://oauth2.googleapis.com/token",
|
||||
UserInfoURL: "https://www.googleapis.com/oauth2/v2/userinfo",
|
||||
ProviderName: "google",
|
||||
}).
|
||||
WithOAuth2(security.OAuth2Config{
|
||||
ClientID: "github-client-id",
|
||||
ClientSecret: "github-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/github/callback",
|
||||
Scopes: []string{"user:email"},
|
||||
AuthURL: "https://github.com/login/oauth/authorize",
|
||||
TokenURL: "https://github.com/login/oauth/access_token",
|
||||
UserInfoURL: "https://api.github.com/user",
|
||||
ProviderName: "github",
|
||||
})
|
||||
|
||||
// Get list of configured providers
|
||||
providers := auth.OAuth2GetProviders() // ["google", "github"]
|
||||
|
||||
// Google routes
|
||||
router.HandleFunc("/auth/google/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := auth.OAuth2GenerateState()
|
||||
authURL, _ := auth.OAuth2GetAuthURL("google", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
router.HandleFunc("/auth/google/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
loginResp, err := auth.OAuth2HandleCallback(r.Context(), "google",
|
||||
r.URL.Query().Get("code"), r.URL.Query().Get("state"))
|
||||
// ... handle response
|
||||
})
|
||||
|
||||
// GitHub routes
|
||||
router.HandleFunc("/auth/github/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := auth.OAuth2GenerateState()
|
||||
authURL, _ := auth.OAuth2GetAuthURL("github", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
router.HandleFunc("/auth/github/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
loginResp, err := auth.OAuth2HandleCallback(r.Context(), "github",
|
||||
r.URL.Query().Get("code"), r.URL.Query().Get("state"))
|
||||
// ... handle response
|
||||
})
|
||||
|
||||
// Use same authenticator for protected routes - works for ALL providers
|
||||
provider, _ := security.NewCompositeSecurityProvider(auth, colSec, rowSec)
|
||||
securityList, _ := security.NewSecurityList(provider)
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### OAuth2Config Fields
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| ClientID | string | OAuth2 client ID from provider |
|
||||
| ClientSecret | string | OAuth2 client secret |
|
||||
| RedirectURL | string | Callback URL registered with provider |
|
||||
| Scopes | []string | OAuth2 scopes to request |
|
||||
| AuthURL | string | Provider's authorization endpoint |
|
||||
| TokenURL | string | Provider's token endpoint |
|
||||
| UserInfoURL | string | Provider's user info endpoint |
|
||||
| DB | *sql.DB | Database connection for sessions |
|
||||
| UserInfoParser | func | Custom parser for user info (optional) |
|
||||
| StateValidator | func | Custom state validator (optional) |
|
||||
| ProviderName | string | Provider name for logging (optional) |
|
||||
|
||||
## User Info Parsing
|
||||
|
||||
The default parser extracts these standard fields:
|
||||
- `sub` → RemoteID
|
||||
- `email` → Email, UserName
|
||||
- `name` → UserName
|
||||
- `login` → UserName (GitHub)
|
||||
|
||||
Custom parser example:
|
||||
|
||||
```go
|
||||
UserInfoParser: func(userInfo map[string]any) (*security.UserContext, error) {
|
||||
// Extract custom fields
|
||||
ctx := &security.UserContext{
|
||||
UserName: userInfo["preferred_username"].(string),
|
||||
Email: userInfo["email"].(string),
|
||||
RemoteID: userInfo["sub"].(string),
|
||||
UserLevel: 1,
|
||||
Roles: []string{"user"},
|
||||
Claims: userInfo, // Store all claims
|
||||
}
|
||||
|
||||
// Add custom roles based on provider data
|
||||
if groups, ok := userInfo["groups"].([]interface{}); ok {
|
||||
for _, g := range groups {
|
||||
ctx.Roles = append(ctx.Roles, g.(string))
|
||||
}
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Always use HTTPS in production**
|
||||
```go
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Secure: true, // Only send over HTTPS
|
||||
HttpOnly: true, // Prevent XSS access
|
||||
SameSite: http.SameSiteLaxMode, // CSRF protection
|
||||
})
|
||||
```
|
||||
|
||||
2. **Store secrets securely**
|
||||
```go
|
||||
clientID := os.Getenv("GOOGLE_CLIENT_ID")
|
||||
clientSecret := os.Getenv("GOOGLE_CLIENT_SECRET")
|
||||
```
|
||||
|
||||
3. **Validate redirect URLs**
|
||||
- Only register trusted redirect URLs with OAuth2 providers
|
||||
- Never accept redirect URL from request parameters
|
||||
|
||||
5. **Session expiration**
|
||||
- OAuth2 sessions automatically expire based on token expiry
|
||||
- Clean up expired sessions periodically:
|
||||
```sql
|
||||
DELETE FROM user_sessions WHERE expires_at < NOW();
|
||||
```
|
||||
|
||||
4. **State parameter**
|
||||
- Automatically generated with cryptographic randomness
|
||||
- One-time use and expires after 10 minutes
|
||||
- Prevents CSRF attacks
|
||||
|
||||
## Implementation Details
|
||||
|
||||
All database operations use stored procedures for consistency and security:
|
||||
- `resolvespec_oauth_getorcreateuser` - Find or create OAuth2 user
|
||||
- `resolvespec_oauth_createsession` - Create OAuth2 session
|
||||
- `resolvespec_oauth_getsession` - Validate and retrieve session
|
||||
- `resolvespec_oauth_deletesession` - Logout/delete session
|
||||
- `resolvespec_oauth_getrefreshtoken` - Get session by refresh token
|
||||
- `resolvespec_oauth_updaterefreshtoken` - Update tokens after refresh
|
||||
- `resolvespec_oauth_getuser` - Get user data by ID
|
||||
|
||||
## Provider Setup Guides
|
||||
|
||||
### Google
|
||||
|
||||
1. Go to [Google Cloud Console](https://console.cloud.google.com/)
|
||||
2. Create a new project or select existing
|
||||
3. Enable Google+ API
|
||||
4. Create OAuth 2.0 credentials
|
||||
5. Add authorized redirect URI: `http://localhost:8080/auth/google/callback`
|
||||
6. Copy Client ID and Client Secret
|
||||
|
||||
### GitHub
|
||||
|
||||
1. Go to [GitHub Developer Settings](https://github.com/settings/developers)
|
||||
2. Click "New OAuth App"
|
||||
3. Set Homepage URL: `http://localhost:8080`
|
||||
4. Set Authorization callback URL: `http://localhost:8080/auth/github/callback`
|
||||
5. Copy Client ID and Client Secret
|
||||
|
||||
### Microsoft
|
||||
|
||||
1. Go to [Azure Portal](https://portal.azure.com/)
|
||||
2. Register new application in Azure AD
|
||||
3. Add redirect URI: `http://localhost:8080/auth/microsoft/callback`
|
||||
4. Create client secret
|
||||
5. Copy Application (client) ID and secret value
|
||||
|
||||
### Facebook
|
||||
|
||||
1. Go to [Facebook Developers](https://developers.facebook.com/)
|
||||
2. Create new app
|
||||
3. Add Facebook Login product
|
||||
4. Set Valid OAuth Redirect URIs: `http://localhost:8080/auth/facebook/callback`
|
||||
5. Copy App ID and App Secret
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "redirect_uri_mismatch" error
|
||||
- Ensure the redirect URL in code matches exactly with provider configuration
|
||||
- Include protocol (http/https), domain, port, and path
|
||||
|
||||
### "invalid_client" error
|
||||
- Verify Client ID and Client Secret are correct
|
||||
- Check if credentials are for the correct environment (dev/prod)
|
||||
|
||||
### "invalid_grant" error during token exchange
|
||||
- State parameter validation failed
|
||||
- Token might have expired
|
||||
- Check server time synchronization
|
||||
|
||||
### User not created after successful OAuth2 login
|
||||
- Check database constraints (username/email unique)
|
||||
- Verify UserInfoParser is extracting required fields
|
||||
- Check database logs for constraint violations
|
||||
|
||||
## Testing
|
||||
|
||||
```go
|
||||
func TestOAuth2Flow(t *testing.T) {
|
||||
// Mock database
|
||||
db, mock, _ := sqlmock.New()
|
||||
|
||||
oauth2Auth := security.NewGoogleAuthenticator(
|
||||
"test-client-id",
|
||||
"test-client-secret",
|
||||
"http://localhost/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
// Test state generation
|
||||
state, err := oauth2Auth.GenerateState()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, state)
|
||||
|
||||
// Test auth URL generation
|
||||
authURL := oauth2Auth.GetAuthURL(state)
|
||||
assert.Contains(t, authURL, "accounts.google.com")
|
||||
assert.Contains(t, authURL, state)
|
||||
}
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### DatabaseAuthenticator with OAuth2
|
||||
|
||||
| Method | Description |
|
||||
|--------|-------------|
|
||||
| WithOAuth2(cfg) | Adds OAuth2 provider (can be called multiple times, returns *DatabaseAuthenticator) |
|
||||
| OAuth2GetAuthURL(provider, state) | Returns OAuth2 authorization URL for specified provider |
|
||||
| OAuth2GenerateState() | Generates random state for CSRF protection |
|
||||
| OAuth2HandleCallback(ctx, provider, code, state) | Exchanges code for token and creates session |
|
||||
| OAuth2RefreshToken(ctx, refreshToken, provider) | Refreshes expired access token using refresh token |
|
||||
| OAuth2GetProviders() | Returns list of configured OAuth2 provider names |
|
||||
| Login(ctx, req) | Standard username/password login |
|
||||
| Logout(ctx, req) | Invalidates session (works for both OAuth2 and regular sessions) |
|
||||
| Authenticate(r) | Validates session token from request (works for both OAuth2 and regular sessions) |
|
||||
|
||||
### Pre-configured Constructors
|
||||
|
||||
- `NewGoogleAuthenticator(clientID, secret, redirectURL, db)` - Single provider
|
||||
- `NewGitHubAuthenticator(clientID, secret, redirectURL, db)` - Single provider
|
||||
- `NewMicrosoftAuthenticator(clientID, secret, redirectURL, db)` - Single provider
|
||||
- `NewFacebookAuthenticator(clientID, secret, redirectURL, db)` - Single provider
|
||||
- `NewMultiProviderAuthenticator(db, configs)` - Multiple providers at once
|
||||
|
||||
All return `*DatabaseAuthenticator` with OAuth2 pre-configured.
|
||||
|
||||
For multiple providers, use `WithOAuth2()` multiple times or `NewMultiProviderAuthenticator()`.
|
||||
|
||||
## Examples
|
||||
|
||||
Complete working examples available in `oauth2_examples.go`:
|
||||
- Basic Google OAuth2
|
||||
- GitHub OAuth2
|
||||
- Custom provider
|
||||
- Multi-provider setup
|
||||
- Token refresh
|
||||
- Logout flow
|
||||
- Complete integration with security middleware
|
||||
281
pkg/security/OAUTH2_REFRESH_QUICK_REFERENCE.md
Normal file
281
pkg/security/OAUTH2_REFRESH_QUICK_REFERENCE.md
Normal file
@@ -0,0 +1,281 @@
|
||||
# OAuth2 Refresh Token - Quick Reference
|
||||
|
||||
## Quick Setup (3 Steps)
|
||||
|
||||
### 1. Initialize Authenticator
|
||||
```go
|
||||
auth := security.NewGoogleAuthenticator(
|
||||
"client-id",
|
||||
"client-secret",
|
||||
"http://localhost:8080/auth/google/callback",
|
||||
db,
|
||||
)
|
||||
```
|
||||
|
||||
### 2. OAuth2 Login Flow
|
||||
```go
|
||||
// Login - Redirect to Google
|
||||
router.HandleFunc("/auth/google/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := auth.OAuth2GenerateState()
|
||||
authURL, _ := auth.OAuth2GetAuthURL("google", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
// Callback - Store tokens
|
||||
router.HandleFunc("/auth/google/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
loginResp, _ := auth.OAuth2HandleCallback(
|
||||
r.Context(),
|
||||
"google",
|
||||
r.URL.Query().Get("code"),
|
||||
r.URL.Query().Get("state"),
|
||||
)
|
||||
|
||||
// Save refresh_token on client
|
||||
// loginResp.RefreshToken - Store this securely!
|
||||
// loginResp.Token - Session token for API calls
|
||||
})
|
||||
```
|
||||
|
||||
### 3. Refresh Endpoint
|
||||
```go
|
||||
router.HandleFunc("/auth/refresh", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
}
|
||||
json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
// Refresh token
|
||||
loginResp, err := auth.OAuth2RefreshToken(r.Context(), req.RefreshToken, "google")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 401)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Multi-Provider Example
|
||||
|
||||
```go
|
||||
// Configure multiple providers
|
||||
auth := security.NewDatabaseAuthenticator(db).
|
||||
WithOAuth2(security.OAuth2Config{
|
||||
ProviderName: "google",
|
||||
ClientID: "google-client-id",
|
||||
ClientSecret: "google-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/google/callback",
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://oauth2.googleapis.com/token",
|
||||
UserInfoURL: "https://www.googleapis.com/oauth2/v2/userinfo",
|
||||
}).
|
||||
WithOAuth2(security.OAuth2Config{
|
||||
ProviderName: "github",
|
||||
ClientID: "github-client-id",
|
||||
ClientSecret: "github-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/github/callback",
|
||||
Scopes: []string{"user:email"},
|
||||
AuthURL: "https://github.com/login/oauth/authorize",
|
||||
TokenURL: "https://github.com/login/oauth/access_token",
|
||||
UserInfoURL: "https://api.github.com/user",
|
||||
})
|
||||
|
||||
// Refresh with provider selection
|
||||
router.HandleFunc("/auth/refresh", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Provider string `json:"provider"` // "google" or "github"
|
||||
}
|
||||
json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
loginResp, err := auth.OAuth2RefreshToken(r.Context(), req.RefreshToken, req.Provider)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 401)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Client-Side JavaScript
|
||||
|
||||
```javascript
|
||||
// Automatic token refresh on 401
|
||||
async function apiCall(url) {
|
||||
let response = await fetch(url, {
|
||||
headers: {
|
||||
'Authorization': 'Bearer ' + localStorage.getItem('access_token')
|
||||
}
|
||||
});
|
||||
|
||||
// Token expired - refresh it
|
||||
if (response.status === 401) {
|
||||
await refreshToken();
|
||||
|
||||
// Retry request with new token
|
||||
response = await fetch(url, {
|
||||
headers: {
|
||||
'Authorization': 'Bearer ' + localStorage.getItem('access_token')
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
async function refreshToken() {
|
||||
const response = await fetch('/auth/refresh', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
refresh_token: localStorage.getItem('refresh_token'),
|
||||
provider: localStorage.getItem('provider')
|
||||
})
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
localStorage.setItem('access_token', data.token);
|
||||
localStorage.setItem('refresh_token', data.refresh_token);
|
||||
} else {
|
||||
// Refresh failed - redirect to login
|
||||
window.location.href = '/login';
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## API Methods
|
||||
|
||||
| Method | Parameters | Returns |
|
||||
|--------|-----------|---------|
|
||||
| `OAuth2RefreshToken` | `ctx, refreshToken, provider` | `*LoginResponse, error` |
|
||||
| `OAuth2HandleCallback` | `ctx, provider, code, state` | `*LoginResponse, error` |
|
||||
| `OAuth2GetAuthURL` | `provider, state` | `string, error` |
|
||||
| `OAuth2GenerateState` | none | `string, error` |
|
||||
| `OAuth2GetProviders` | none | `[]string` |
|
||||
|
||||
---
|
||||
|
||||
## LoginResponse Structure
|
||||
|
||||
```go
|
||||
type LoginResponse struct {
|
||||
Token string // New session token for API calls
|
||||
RefreshToken string // Refresh token (store securely)
|
||||
User *UserContext // User information
|
||||
ExpiresIn int64 // Seconds until token expires
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Database Stored Procedures
|
||||
|
||||
- `resolvespec_oauth_getrefreshtoken(refresh_token)` - Get session by refresh token
|
||||
- `resolvespec_oauth_updaterefreshtoken(update_data)` - Update tokens after refresh
|
||||
- `resolvespec_oauth_getuser(user_id)` - Get user data
|
||||
|
||||
All procedures return: `{p_success bool, p_error text, p_data jsonb}`
|
||||
|
||||
---
|
||||
|
||||
## Common Errors
|
||||
|
||||
| Error | Cause | Solution |
|
||||
|-------|-------|----------|
|
||||
| `invalid or expired refresh token` | Token revoked/expired | Re-authenticate user |
|
||||
| `OAuth2 provider 'xxx' not found` | Provider not configured | Add with `WithOAuth2()` |
|
||||
| `failed to refresh token with provider` | Provider rejected request | Check credentials, re-auth user |
|
||||
|
||||
---
|
||||
|
||||
## Security Checklist
|
||||
|
||||
- [ ] Use HTTPS for all OAuth2 endpoints
|
||||
- [ ] Store refresh tokens securely (HttpOnly cookies or encrypted storage)
|
||||
- [ ] Set cookie flags: `HttpOnly`, `Secure`, `SameSite=Strict`
|
||||
- [ ] Implement rate limiting on refresh endpoint
|
||||
- [ ] Log refresh attempts for audit
|
||||
- [ ] Rotate tokens on refresh
|
||||
- [ ] Revoke old sessions after successful refresh
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
# 1. Login and get refresh token
|
||||
curl http://localhost:8080/auth/google/login
|
||||
# Follow OAuth2 flow, get refresh_token from callback response
|
||||
|
||||
# 2. Refresh token
|
||||
curl -X POST http://localhost:8080/auth/refresh \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"refresh_token":"ya29.xxx","provider":"google"}'
|
||||
|
||||
# 3. Use new token
|
||||
curl http://localhost:8080/api/protected \
|
||||
-H "Authorization: Bearer sess_abc123..."
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pre-configured Providers
|
||||
|
||||
```go
|
||||
// Google
|
||||
auth := security.NewGoogleAuthenticator(clientID, secret, redirectURL, db)
|
||||
|
||||
// GitHub
|
||||
auth := security.NewGitHubAuthenticator(clientID, secret, redirectURL, db)
|
||||
|
||||
// Microsoft
|
||||
auth := security.NewMicrosoftAuthenticator(clientID, secret, redirectURL, db)
|
||||
|
||||
// Facebook
|
||||
auth := security.NewFacebookAuthenticator(clientID, secret, redirectURL, db)
|
||||
|
||||
// All providers at once
|
||||
auth := security.NewMultiProviderAuthenticator(db, map[string]security.OAuth2Config{
|
||||
"google": {...},
|
||||
"github": {...},
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Provider-Specific Notes
|
||||
|
||||
### Google
|
||||
- Add `access_type=offline` to get refresh token
|
||||
- Add `prompt=consent` to force consent screen
|
||||
```go
|
||||
authURL += "&access_type=offline&prompt=consent"
|
||||
```
|
||||
|
||||
### GitHub
|
||||
- Refresh tokens not always provided
|
||||
- May need to request `offline_access` scope
|
||||
|
||||
### Microsoft
|
||||
- Use `offline_access` scope for refresh token
|
||||
|
||||
### Facebook
|
||||
- Tokens expire after 60 days by default
|
||||
- Check app settings for token expiration policy
|
||||
|
||||
---
|
||||
|
||||
## Complete Example
|
||||
|
||||
See `/pkg/security/oauth2_examples.go` line 250 for full working example.
|
||||
|
||||
For detailed documentation see `/pkg/security/OAUTH2_REFRESH_TOKEN_IMPLEMENTATION.md`.
|
||||
495
pkg/security/OAUTH2_REFRESH_TOKEN_IMPLEMENTATION.md
Normal file
495
pkg/security/OAUTH2_REFRESH_TOKEN_IMPLEMENTATION.md
Normal file
@@ -0,0 +1,495 @@
|
||||
# OAuth2 Refresh Token Implementation
|
||||
|
||||
## Overview
|
||||
|
||||
OAuth2 refresh token functionality is **fully implemented** in the ResolveSpec security package. This allows refreshing expired access tokens without requiring users to re-authenticate.
|
||||
|
||||
## Implementation Status: ✅ COMPLETE
|
||||
|
||||
### Components Implemented
|
||||
|
||||
1. **✅ Database Schema** - Tables and stored procedures
|
||||
2. **✅ Go Methods** - OAuth2RefreshToken implementation
|
||||
3. **✅ Thread Safety** - Mutex protection for provider map
|
||||
4. **✅ Examples** - Working code examples
|
||||
5. **✅ Documentation** - Complete API reference
|
||||
|
||||
---
|
||||
|
||||
## 1. Database Schema
|
||||
|
||||
### Tables Modified
|
||||
|
||||
```sql
|
||||
-- user_sessions table with OAuth2 token fields
|
||||
CREATE TABLE IF NOT EXISTS user_sessions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
session_token VARCHAR(500) NOT NULL UNIQUE,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
last_activity_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
ip_address VARCHAR(45),
|
||||
user_agent TEXT,
|
||||
access_token TEXT, -- OAuth2 access token
|
||||
refresh_token TEXT, -- OAuth2 refresh token
|
||||
token_type VARCHAR(50), -- "Bearer", etc.
|
||||
auth_provider VARCHAR(50) -- "google", "github", etc.
|
||||
);
|
||||
```
|
||||
|
||||
### Stored Procedures
|
||||
|
||||
**`resolvespec_oauth_getrefreshtoken(p_refresh_token)`**
|
||||
- Gets OAuth2 session data by refresh token
|
||||
- Returns: `{user_id, access_token, token_type, expiry}`
|
||||
- Location: `database_schema.sql:714`
|
||||
|
||||
**`resolvespec_oauth_updaterefreshtoken(p_update_data)`**
|
||||
- Updates session with new tokens after refresh
|
||||
- Input: `{user_id, old_refresh_token, new_session_token, new_access_token, new_refresh_token, expires_at}`
|
||||
- Location: `database_schema.sql:752`
|
||||
|
||||
**`resolvespec_oauth_getuser(p_user_id)`**
|
||||
- Gets user data by ID for building UserContext
|
||||
- Location: `database_schema.sql:791`
|
||||
|
||||
---
|
||||
|
||||
## 2. Go Implementation
|
||||
|
||||
### Method Signature
|
||||
|
||||
```go
|
||||
func (a *DatabaseAuthenticator) OAuth2RefreshToken(
|
||||
ctx context.Context,
|
||||
refreshToken string,
|
||||
providerName string,
|
||||
) (*LoginResponse, error)
|
||||
```
|
||||
|
||||
**Location:** `pkg/security/oauth2_methods.go:375`
|
||||
|
||||
### Implementation Flow
|
||||
|
||||
```
|
||||
1. Validate provider exists
|
||||
├─ getOAuth2Provider(providerName) with RLock
|
||||
└─ Return error if provider not configured
|
||||
|
||||
2. Get session from database
|
||||
├─ Call resolvespec_oauth_getrefreshtoken(refreshToken)
|
||||
└─ Parse session data {user_id, access_token, token_type, expiry}
|
||||
|
||||
3. Refresh token with OAuth2 provider
|
||||
├─ Create oauth2.Token from stored data
|
||||
├─ Use provider.config.TokenSource(ctx, oldToken)
|
||||
└─ Call tokenSource.Token() to get new token
|
||||
|
||||
4. Generate new session token
|
||||
└─ Use OAuth2GenerateState() for secure random token
|
||||
|
||||
5. Update database
|
||||
├─ Call resolvespec_oauth_updaterefreshtoken()
|
||||
└─ Store new session_token, access_token, refresh_token
|
||||
|
||||
6. Get user data
|
||||
├─ Call resolvespec_oauth_getuser(user_id)
|
||||
└─ Build UserContext
|
||||
|
||||
7. Return LoginResponse
|
||||
└─ {Token, RefreshToken, User, ExpiresIn}
|
||||
```
|
||||
|
||||
### Thread Safety
|
||||
|
||||
**Mutex Protection:** All access to `oauth2Providers` map is protected with `sync.RWMutex`
|
||||
|
||||
```go
|
||||
type DatabaseAuthenticator struct {
|
||||
oauth2Providers map[string]*OAuth2Provider
|
||||
oauth2ProvidersMutex sync.RWMutex // Thread-safe access
|
||||
}
|
||||
|
||||
// Read operations use RLock
|
||||
func (a *DatabaseAuthenticator) getOAuth2Provider(name string) {
|
||||
a.oauth2ProvidersMutex.RLock()
|
||||
defer a.oauth2ProvidersMutex.RUnlock()
|
||||
// ... access map
|
||||
}
|
||||
|
||||
// Write operations use Lock
|
||||
func (a *DatabaseAuthenticator) WithOAuth2(cfg OAuth2Config) {
|
||||
a.oauth2ProvidersMutex.Lock()
|
||||
defer a.oauth2ProvidersMutex.Unlock()
|
||||
// ... modify map
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Usage Examples
|
||||
|
||||
### Single Provider (Google)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
// Create Google OAuth2 authenticator
|
||||
auth := security.NewGoogleAuthenticator(
|
||||
"your-client-id",
|
||||
"your-client-secret",
|
||||
"http://localhost:8080/auth/google/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
// Token refresh endpoint
|
||||
router.HandleFunc("/auth/refresh", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
}
|
||||
json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
// Refresh token (provider name defaults to "google")
|
||||
loginResp, err := auth.OAuth2RefreshToken(r.Context(), req.RefreshToken, "google")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Set new session cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResp.Token,
|
||||
Path: "/",
|
||||
MaxAge: int(loginResp.ExpiresIn),
|
||||
HttpOnly: true,
|
||||
Secure: true,
|
||||
})
|
||||
|
||||
json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
http.ListenAndServe(":8080", router)
|
||||
}
|
||||
```
|
||||
|
||||
### Multi-Provider Setup
|
||||
|
||||
```go
|
||||
// Single authenticator with multiple OAuth2 providers
|
||||
auth := security.NewDatabaseAuthenticator(db).
|
||||
WithOAuth2(security.OAuth2Config{
|
||||
ClientID: "google-client-id",
|
||||
ClientSecret: "google-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/google/callback",
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://oauth2.googleapis.com/token",
|
||||
UserInfoURL: "https://www.googleapis.com/oauth2/v2/userinfo",
|
||||
ProviderName: "google",
|
||||
}).
|
||||
WithOAuth2(security.OAuth2Config{
|
||||
ClientID: "github-client-id",
|
||||
ClientSecret: "github-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/github/callback",
|
||||
Scopes: []string{"user:email"},
|
||||
AuthURL: "https://github.com/login/oauth/authorize",
|
||||
TokenURL: "https://github.com/login/oauth/access_token",
|
||||
UserInfoURL: "https://api.github.com/user",
|
||||
ProviderName: "github",
|
||||
})
|
||||
|
||||
// Refresh endpoint with provider selection
|
||||
router.HandleFunc("/auth/refresh", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Provider string `json:"provider"` // "google" or "github"
|
||||
}
|
||||
json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
// Refresh with specific provider
|
||||
loginResp, err := auth.OAuth2RefreshToken(r.Context(), req.RefreshToken, req.Provider)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
```
|
||||
|
||||
### Client-Side Usage
|
||||
|
||||
```javascript
|
||||
// JavaScript client example
|
||||
async function refreshAccessToken() {
|
||||
const refreshToken = localStorage.getItem('refresh_token');
|
||||
const provider = localStorage.getItem('auth_provider'); // "google", "github", etc.
|
||||
|
||||
const response = await fetch('/auth/refresh', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
refresh_token: refreshToken,
|
||||
provider: provider
|
||||
})
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
|
||||
// Store new tokens
|
||||
localStorage.setItem('access_token', data.token);
|
||||
localStorage.setItem('refresh_token', data.refresh_token);
|
||||
|
||||
console.log('Token refreshed successfully');
|
||||
return data.token;
|
||||
} else {
|
||||
// Refresh failed - redirect to login
|
||||
window.location.href = '/login';
|
||||
}
|
||||
}
|
||||
|
||||
// Automatically refresh token when API returns 401
|
||||
async function apiCall(endpoint) {
|
||||
let response = await fetch(endpoint, {
|
||||
headers: {
|
||||
'Authorization': 'Bearer ' + localStorage.getItem('access_token')
|
||||
}
|
||||
});
|
||||
|
||||
if (response.status === 401) {
|
||||
// Token expired - try refresh
|
||||
const newToken = await refreshAccessToken();
|
||||
|
||||
// Retry with new token
|
||||
response = await fetch(endpoint, {
|
||||
headers: {
|
||||
'Authorization': 'Bearer ' + newToken
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. API Reference
|
||||
|
||||
### DatabaseAuthenticator Methods
|
||||
|
||||
| Method | Signature | Description |
|
||||
|--------|-----------|-------------|
|
||||
| `OAuth2RefreshToken` | `(ctx, refreshToken, provider) (*LoginResponse, error)` | Refreshes expired OAuth2 access token |
|
||||
| `WithOAuth2` | `(cfg OAuth2Config) *DatabaseAuthenticator` | Adds OAuth2 provider (chainable) |
|
||||
| `OAuth2GetAuthURL` | `(provider, state) (string, error)` | Gets authorization URL |
|
||||
| `OAuth2HandleCallback` | `(ctx, provider, code, state) (*LoginResponse, error)` | Handles OAuth2 callback |
|
||||
| `OAuth2GenerateState` | `() (string, error)` | Generates CSRF state token |
|
||||
| `OAuth2GetProviders` | `() []string` | Lists configured providers |
|
||||
|
||||
### LoginResponse Structure
|
||||
|
||||
```go
|
||||
type LoginResponse struct {
|
||||
Token string // New session token
|
||||
RefreshToken string // New refresh token (may be same as input)
|
||||
User *UserContext // User information
|
||||
ExpiresIn int64 // Seconds until expiration
|
||||
}
|
||||
|
||||
type UserContext struct {
|
||||
UserID int // Database user ID
|
||||
UserName string // Username
|
||||
Email string // Email address
|
||||
UserLevel int // Permission level
|
||||
SessionID string // Session token
|
||||
RemoteID string // OAuth2 provider user ID
|
||||
Roles []string // User roles
|
||||
Claims map[string]any // Additional claims
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Important Notes
|
||||
|
||||
### Provider Configuration
|
||||
|
||||
**For Google:** Add `access_type=offline` to get refresh token on first login:
|
||||
|
||||
```go
|
||||
auth := security.NewGoogleAuthenticator(clientID, clientSecret, redirectURL, db)
|
||||
// When generating auth URL, add access_type parameter
|
||||
authURL, _ := auth.OAuth2GetAuthURL("google", state)
|
||||
authURL += "&access_type=offline&prompt=consent"
|
||||
```
|
||||
|
||||
**For GitHub:** Refresh tokens are not always provided. Check provider documentation.
|
||||
|
||||
### Token Storage
|
||||
|
||||
- Store refresh tokens securely on client (localStorage, secure cookie, etc.)
|
||||
- Never log refresh tokens
|
||||
- Refresh tokens are long-lived (days/months depending on provider)
|
||||
- Access tokens are short-lived (minutes/hours)
|
||||
|
||||
### Error Handling
|
||||
|
||||
Common errors:
|
||||
- `"invalid or expired refresh token"` - Token expired or revoked
|
||||
- `"OAuth2 provider 'xxx' not found"` - Provider not configured
|
||||
- `"failed to refresh token with provider"` - Provider rejected refresh request
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
1. **Always use HTTPS** for token transmission
|
||||
2. **Store refresh tokens securely** on client
|
||||
3. **Set appropriate cookie flags**: `HttpOnly`, `Secure`, `SameSite`
|
||||
4. **Implement token rotation** - issue new refresh token on each refresh
|
||||
5. **Revoke old tokens** after successful refresh
|
||||
6. **Rate limit** refresh endpoints
|
||||
7. **Log refresh attempts** for audit trail
|
||||
|
||||
---
|
||||
|
||||
## 6. Testing
|
||||
|
||||
### Manual Test Flow
|
||||
|
||||
1. **Initial Login:**
|
||||
```bash
|
||||
curl http://localhost:8080/auth/google/login
|
||||
# Follow redirect to Google
|
||||
# Returns to callback with LoginResponse containing refresh_token
|
||||
```
|
||||
|
||||
2. **Wait for Token Expiry (or manually expire in DB)**
|
||||
|
||||
3. **Refresh Token:**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/auth/refresh \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"refresh_token": "ya29.a0AfH6SMB...",
|
||||
"provider": "google"
|
||||
}'
|
||||
|
||||
# Response:
|
||||
{
|
||||
"token": "sess_abc123...",
|
||||
"refresh_token": "ya29.a0AfH6SMB...",
|
||||
"user": {
|
||||
"user_id": 1,
|
||||
"user_name": "john_doe",
|
||||
"email": "john@example.com",
|
||||
"session_id": "sess_abc123..."
|
||||
},
|
||||
"expires_in": 3600
|
||||
}
|
||||
```
|
||||
|
||||
4. **Use New Token:**
|
||||
```bash
|
||||
curl http://localhost:8080/api/protected \
|
||||
-H "Authorization: Bearer sess_abc123..."
|
||||
```
|
||||
|
||||
### Database Verification
|
||||
|
||||
```sql
|
||||
-- Check session with refresh token
|
||||
SELECT session_token, user_id, expires_at, refresh_token, auth_provider
|
||||
FROM user_sessions
|
||||
WHERE refresh_token = 'ya29.a0AfH6SMB...';
|
||||
|
||||
-- Verify token was updated after refresh
|
||||
SELECT session_token, access_token, refresh_token,
|
||||
expires_at, last_activity_at
|
||||
FROM user_sessions
|
||||
WHERE user_id = 1
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Troubleshooting
|
||||
|
||||
### "Refresh token not found or expired"
|
||||
|
||||
**Cause:** Refresh token doesn't exist in database or session expired
|
||||
|
||||
**Solution:**
|
||||
- Check if initial OAuth2 login stored refresh token
|
||||
- Verify provider returns refresh token (some require `access_type=offline`)
|
||||
- Check session hasn't been deleted from database
|
||||
|
||||
### "Failed to refresh token with provider"
|
||||
|
||||
**Cause:** OAuth2 provider rejected the refresh request
|
||||
|
||||
**Possible reasons:**
|
||||
- Refresh token was revoked by user
|
||||
- OAuth2 app credentials changed
|
||||
- Network connectivity issues
|
||||
- Provider rate limiting
|
||||
|
||||
**Solution:**
|
||||
- Re-authenticate user (full OAuth2 flow)
|
||||
- Check provider dashboard for app status
|
||||
- Verify client credentials are correct
|
||||
|
||||
### "OAuth2 provider 'xxx' not found"
|
||||
|
||||
**Cause:** Provider not registered with `WithOAuth2()`
|
||||
|
||||
**Solution:**
|
||||
```go
|
||||
// Make sure provider is configured
|
||||
auth := security.NewDatabaseAuthenticator(db).
|
||||
WithOAuth2(security.OAuth2Config{
|
||||
ProviderName: "google", // This name must match refresh call
|
||||
// ... other config
|
||||
})
|
||||
|
||||
// Then use same name in refresh
|
||||
auth.OAuth2RefreshToken(ctx, token, "google") // Must match ProviderName
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. Complete Working Example
|
||||
|
||||
See `pkg/security/oauth2_examples.go:250` for full working example with token refresh.
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
OAuth2 refresh token functionality is **production-ready** with:
|
||||
|
||||
- ✅ Complete database schema with stored procedures
|
||||
- ✅ Thread-safe Go implementation with mutex protection
|
||||
- ✅ Multi-provider support (Google, GitHub, Microsoft, Facebook, custom)
|
||||
- ✅ Comprehensive error handling
|
||||
- ✅ Working code examples
|
||||
- ✅ Full API documentation
|
||||
- ✅ Security best practices implemented
|
||||
|
||||
**No additional implementation needed - feature is complete and functional.**
|
||||
208
pkg/security/PASSKEY_QUICK_REFERENCE.md
Normal file
208
pkg/security/PASSKEY_QUICK_REFERENCE.md
Normal file
@@ -0,0 +1,208 @@
|
||||
# Passkey Authentication Quick Reference
|
||||
|
||||
## Overview
|
||||
Passkey authentication (WebAuthn/FIDO2) is now integrated into the DatabaseAuthenticator. This provides passwordless authentication using biometrics, security keys, or device credentials.
|
||||
|
||||
## Setup
|
||||
|
||||
### Database Schema
|
||||
Run the passkey SQL schema (in database_schema.sql):
|
||||
- Creates `user_passkey_credentials` table
|
||||
- Adds stored procedures for passkey operations
|
||||
|
||||
### Go Code
|
||||
```go
|
||||
// Create passkey provider
|
||||
passkeyProvider := security.NewDatabasePasskeyProvider(db,
|
||||
security.DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com",
|
||||
RPName: "Example App",
|
||||
RPOrigin: "https://example.com",
|
||||
Timeout: 60000,
|
||||
})
|
||||
|
||||
// Create authenticator with passkey support
|
||||
auth := security.NewDatabaseAuthenticatorWithOptions(db,
|
||||
security.DatabaseAuthenticatorOptions{
|
||||
PasskeyProvider: passkeyProvider,
|
||||
})
|
||||
|
||||
// Or add passkey to existing authenticator
|
||||
auth = security.NewDatabaseAuthenticator(db).WithPasskey(passkeyProvider)
|
||||
```
|
||||
|
||||
## Registration Flow
|
||||
|
||||
### Backend - Step 1: Begin Registration
|
||||
```go
|
||||
options, err := auth.BeginPasskeyRegistration(ctx,
|
||||
security.PasskeyBeginRegistrationRequest{
|
||||
UserID: 1,
|
||||
Username: "alice",
|
||||
DisplayName: "Alice Smith",
|
||||
})
|
||||
// Send options to client as JSON
|
||||
```
|
||||
|
||||
### Frontend - Step 2: Create Credential
|
||||
```javascript
|
||||
// Convert options from server
|
||||
options.challenge = base64ToArrayBuffer(options.challenge);
|
||||
options.user.id = base64ToArrayBuffer(options.user.id);
|
||||
|
||||
// Create credential
|
||||
const credential = await navigator.credentials.create({
|
||||
publicKey: options
|
||||
});
|
||||
|
||||
// Send credential back to server
|
||||
```
|
||||
|
||||
### Backend - Step 3: Complete Registration
|
||||
```go
|
||||
credential, err := auth.CompletePasskeyRegistration(ctx,
|
||||
security.PasskeyRegisterRequest{
|
||||
UserID: 1,
|
||||
Response: clientResponse,
|
||||
ExpectedChallenge: storedChallenge,
|
||||
CredentialName: "My iPhone",
|
||||
})
|
||||
```
|
||||
|
||||
## Authentication Flow
|
||||
|
||||
### Backend - Step 1: Begin Authentication
|
||||
```go
|
||||
options, err := auth.BeginPasskeyAuthentication(ctx,
|
||||
security.PasskeyBeginAuthenticationRequest{
|
||||
Username: "alice", // Optional for resident key
|
||||
})
|
||||
// Send options to client as JSON
|
||||
```
|
||||
|
||||
### Frontend - Step 2: Get Credential
|
||||
```javascript
|
||||
// Convert options from server
|
||||
options.challenge = base64ToArrayBuffer(options.challenge);
|
||||
|
||||
// Get credential
|
||||
const credential = await navigator.credentials.get({
|
||||
publicKey: options
|
||||
});
|
||||
|
||||
// Send assertion back to server
|
||||
```
|
||||
|
||||
### Backend - Step 3: Complete Authentication
|
||||
```go
|
||||
loginResponse, err := auth.LoginWithPasskey(ctx,
|
||||
security.PasskeyLoginRequest{
|
||||
Response: clientAssertion,
|
||||
ExpectedChallenge: storedChallenge,
|
||||
Claims: map[string]any{
|
||||
"ip_address": "192.168.1.1",
|
||||
"user_agent": "Mozilla/5.0...",
|
||||
},
|
||||
})
|
||||
// Returns session token and user info
|
||||
```
|
||||
|
||||
## Credential Management
|
||||
|
||||
### List Credentials
|
||||
```go
|
||||
credentials, err := auth.GetPasskeyCredentials(ctx, userID)
|
||||
```
|
||||
|
||||
### Update Credential Name
|
||||
```go
|
||||
err := auth.UpdatePasskeyCredentialName(ctx, userID, credentialID, "New Name")
|
||||
```
|
||||
|
||||
### Delete Credential
|
||||
```go
|
||||
err := auth.DeletePasskeyCredential(ctx, userID, credentialID)
|
||||
```
|
||||
|
||||
## HTTP Endpoints Example
|
||||
|
||||
### POST /api/passkey/register/begin
|
||||
Request: `{user_id, username, display_name}`
|
||||
Response: PasskeyRegistrationOptions
|
||||
|
||||
### POST /api/passkey/register/complete
|
||||
Request: `{user_id, response, credential_name}`
|
||||
Response: PasskeyCredential
|
||||
|
||||
### POST /api/passkey/login/begin
|
||||
Request: `{username}` (optional)
|
||||
Response: PasskeyAuthenticationOptions
|
||||
|
||||
### POST /api/passkey/login/complete
|
||||
Request: `{response}`
|
||||
Response: LoginResponse with session token
|
||||
|
||||
### GET /api/passkey/credentials
|
||||
Response: Array of PasskeyCredential
|
||||
|
||||
### DELETE /api/passkey/credentials/{id}
|
||||
Request: `{credential_id}`
|
||||
Response: 204 No Content
|
||||
|
||||
## Database Stored Procedures
|
||||
|
||||
- `resolvespec_passkey_store_credential` - Store new credential
|
||||
- `resolvespec_passkey_get_credential` - Get credential by ID
|
||||
- `resolvespec_passkey_get_user_credentials` - Get all user credentials
|
||||
- `resolvespec_passkey_update_counter` - Update sign counter (clone detection)
|
||||
- `resolvespec_passkey_delete_credential` - Delete credential
|
||||
- `resolvespec_passkey_update_name` - Update credential name
|
||||
- `resolvespec_passkey_get_credentials_by_username` - Get credentials for login
|
||||
|
||||
## Security Features
|
||||
|
||||
- **Clone Detection**: Sign counter validation detects credential cloning
|
||||
- **Attestation Support**: Stores attestation type (none, indirect, direct)
|
||||
- **Transport Options**: Tracks authenticator transports (usb, nfc, ble, internal)
|
||||
- **Backup State**: Tracks if credential is backed up/synced
|
||||
- **User Verification**: Supports preferred/required user verification
|
||||
|
||||
## Important Notes
|
||||
|
||||
1. **WebAuthn Library**: Current implementation is simplified. For production, use a proper WebAuthn library like `github.com/go-webauthn/webauthn` for full verification.
|
||||
|
||||
2. **Challenge Storage**: Store challenges securely in session/cache. Never expose challenges to client beyond initial request.
|
||||
|
||||
3. **HTTPS Required**: Passkeys only work over HTTPS (except localhost).
|
||||
|
||||
4. **Browser Support**: Check browser compatibility for WebAuthn API.
|
||||
|
||||
5. **Relying Party ID**: Must match your domain exactly.
|
||||
|
||||
## Client-Side Helper Functions
|
||||
|
||||
```javascript
|
||||
function base64ToArrayBuffer(base64) {
|
||||
const binary = atob(base64);
|
||||
const bytes = new Uint8Array(binary.length);
|
||||
for (let i = 0; i < binary.length; i++) {
|
||||
bytes[i] = binary.charCodeAt(i);
|
||||
}
|
||||
return bytes.buffer;
|
||||
}
|
||||
|
||||
function arrayBufferToBase64(buffer) {
|
||||
const bytes = new Uint8Array(buffer);
|
||||
let binary = '';
|
||||
for (let i = 0; i < bytes.length; i++) {
|
||||
binary += String.fromCharCode(bytes[i]);
|
||||
}
|
||||
return btoa(binary);
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Run tests: `go test -v ./pkg/security -run Passkey`
|
||||
|
||||
All passkey functionality includes comprehensive tests using sqlmock.
|
||||
@@ -7,15 +7,16 @@
|
||||
auth := security.NewDatabaseAuthenticator(db) // Session-based (recommended)
|
||||
// OR: auth := security.NewJWTAuthenticator("secret-key", db)
|
||||
// OR: auth := security.NewHeaderAuthenticator()
|
||||
// OR: auth := security.NewGoogleAuthenticator(clientID, secret, redirectURL, db) // OAuth2
|
||||
|
||||
colSec := security.NewDatabaseColumnSecurityProvider(db)
|
||||
rowSec := security.NewDatabaseRowSecurityProvider(db)
|
||||
|
||||
// Step 2: Combine providers
|
||||
provider := security.NewCompositeSecurityProvider(auth, colSec, rowSec)
|
||||
provider, _ := security.NewCompositeSecurityProvider(auth, colSec, rowSec)
|
||||
|
||||
// Step 3: Setup and apply middleware
|
||||
securityList := security.SetupSecurityProvider(handler, provider)
|
||||
securityList, _ := security.SetupSecurityProvider(handler, provider)
|
||||
router.Use(security.NewAuthMiddleware(securityList))
|
||||
router.Use(security.SetSecurityMiddleware(securityList))
|
||||
```
|
||||
@@ -30,6 +31,7 @@ router.Use(security.SetSecurityMiddleware(securityList))
|
||||
```go
|
||||
// DatabaseAuthenticator uses these stored procedures:
|
||||
resolvespec_login(jsonb) // Login with credentials
|
||||
resolvespec_register(jsonb) // Register new user
|
||||
resolvespec_logout(jsonb) // Invalidate session
|
||||
resolvespec_session(text, text) // Validate session token
|
||||
resolvespec_session_update(text, jsonb) // Update activity timestamp
|
||||
@@ -256,11 +258,8 @@ func (a *JWTAuthenticator) Login(ctx context.Context, req security.LoginRequest)
|
||||
}
|
||||
|
||||
func (a *JWTAuthenticator) Logout(ctx context.Context, req security.LogoutRequest) error {
|
||||
// Add to blacklist
|
||||
return a.db.WithContext(ctx).Table("token_blacklist").Create(map[string]any{
|
||||
"token": req.Token,
|
||||
"user_id": req.UserID,
|
||||
}).Error
|
||||
// Invalidate session via stored procedure
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *JWTAuthenticator) Authenticate(r *http.Request) (*security.UserContext, error) {
|
||||
@@ -403,11 +402,16 @@ assert.Equal(t, "user_id = {UserID}", row.Template)
|
||||
```
|
||||
HTTP Request
|
||||
↓
|
||||
NewAuthMiddleware → calls provider.Authenticate()
|
||||
↓ (adds UserContext to context)
|
||||
NewOptionalAuthMiddleware → calls provider.Authenticate()
|
||||
↓ (adds UserContext or guest context; never 401)
|
||||
SetSecurityMiddleware → adds SecurityList to context
|
||||
↓
|
||||
Handler.Handle()
|
||||
Handler.Handle() → resolves model
|
||||
↓
|
||||
BeforeHandle Hook → CheckModelAuthAllowed(secCtx, operation)
|
||||
├─ SecurityDisabled → allow
|
||||
├─ CanPublicRead/Create/Update/Delete → allow unauthenticated
|
||||
└─ UserID == 0 → abort 401
|
||||
↓
|
||||
BeforeRead Hook → calls provider.GetColumnSecurity() + GetRowSecurity()
|
||||
↓
|
||||
@@ -502,10 +506,31 @@ func (p *MyProvider) GetColumnSecurity(ctx context.Context, userID int, schema,
|
||||
|
||||
---
|
||||
|
||||
## Login/Logout Endpoints
|
||||
## Login/Logout/Register Endpoints
|
||||
|
||||
```go
|
||||
func SetupAuthRoutes(router *mux.Router, securityList *security.SecurityList) {
|
||||
// Register
|
||||
router.HandleFunc("/auth/register", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req security.RegisterRequest
|
||||
json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
// Check if provider supports registration
|
||||
registrable, ok := securityList.Provider().(security.Registrable)
|
||||
if !ok {
|
||||
http.Error(w, "Registration not supported", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := registrable.Register(r.Context(), req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}).Methods("POST")
|
||||
|
||||
// Login
|
||||
router.HandleFunc("/auth/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req security.LoginRequest
|
||||
@@ -670,15 +695,30 @@ http.Handle("/api/protected", authHandler)
|
||||
optionalHandler := security.NewOptionalAuthHandler(securityList, myHandler)
|
||||
http.Handle("/home", optionalHandler)
|
||||
|
||||
// Example handler
|
||||
func myHandler(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, _ := security.GetUserContext(r.Context())
|
||||
if userCtx.UserID == 0 {
|
||||
// Guest user
|
||||
} else {
|
||||
// Authenticated user
|
||||
}
|
||||
}
|
||||
// NewOptionalAuthMiddleware - For spec routes; auth enforcement deferred to BeforeHandle
|
||||
apiRouter.Use(security.NewOptionalAuthMiddleware(securityList))
|
||||
apiRouter.Use(security.SetSecurityMiddleware(securityList))
|
||||
restheadspec.RegisterSecurityHooks(handler, securityList) // includes BeforeHandle
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Model-Level Access Control
|
||||
|
||||
```go
|
||||
// Register model with rules (pkg/modelregistry)
|
||||
modelregistry.RegisterModelWithRules("public.products", &Product{}, modelregistry.ModelRules{
|
||||
SecurityDisabled: false, // skip all auth when true
|
||||
CanPublicRead: true, // unauthenticated reads allowed
|
||||
CanPublicCreate: false, // requires auth
|
||||
CanPublicUpdate: false, // requires auth
|
||||
CanPublicDelete: false, // requires auth
|
||||
CanUpdate: true, // authenticated can update
|
||||
CanDelete: false, // authenticated cannot delete (enforced in BeforeDelete)
|
||||
})
|
||||
|
||||
// CheckModelAuthAllowed used automatically in BeforeHandle hook
|
||||
// No code needed — call RegisterSecurityHooks and it's applied
|
||||
```
|
||||
|
||||
---
|
||||
@@ -707,6 +747,7 @@ meta, ok := security.GetUserMeta(ctx)
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `INTERFACE_GUIDE.md` | **Start here** - Complete implementation guide |
|
||||
| `OAUTH2.md` | **OAuth2 Guide** - Google, GitHub, Microsoft, Facebook, custom providers |
|
||||
| `examples.go` | Working provider implementations to copy |
|
||||
| `setup_example.go` | 6 complete integration examples |
|
||||
| `README.md` | Architecture overview and migration guide |
|
||||
|
||||
@@ -6,6 +6,7 @@ Type-safe, composable security system for ResolveSpec with support for authentic
|
||||
|
||||
- ✅ **Interface-Based** - Type-safe providers instead of callbacks
|
||||
- ✅ **Login/Logout Support** - Built-in authentication lifecycle
|
||||
- ✅ **Two-Factor Authentication (2FA)** - Optional TOTP support for enhanced security
|
||||
- ✅ **Composable** - Mix and match different providers
|
||||
- ✅ **No Global State** - Each handler has its own security configuration
|
||||
- ✅ **Testable** - Easy to mock and test
|
||||
@@ -212,6 +213,23 @@ auth := security.NewJWTAuthenticator("secret-key", db)
|
||||
// Note: Requires JWT library installation for token signing/verification
|
||||
```
|
||||
|
||||
**TwoFactorAuthenticator** - Wraps any authenticator with TOTP 2FA:
|
||||
```go
|
||||
baseAuth := security.NewDatabaseAuthenticator(db)
|
||||
|
||||
// Use in-memory provider (for testing)
|
||||
tfaProvider := security.NewMemoryTwoFactorProvider(nil)
|
||||
|
||||
// Or use database provider (for production)
|
||||
tfaProvider := security.NewDatabaseTwoFactorProvider(db, nil)
|
||||
// Requires: users table with totp fields, user_totp_backup_codes table
|
||||
// Requires: resolvespec_totp_* stored procedures (see totp_database_schema.sql)
|
||||
|
||||
auth := security.NewTwoFactorAuthenticator(baseAuth, tfaProvider, nil)
|
||||
// Supports: TOTP codes, backup codes, QR code generation
|
||||
// Compatible with Google Authenticator, Microsoft Authenticator, Authy, etc.
|
||||
```
|
||||
|
||||
### Column Security Providers
|
||||
|
||||
**DatabaseColumnSecurityProvider** - Loads rules from database:
|
||||
@@ -334,7 +352,182 @@ func handleRefresh(securityList *security.SecurityList) http.HandlerFunc {
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Two-Factor Authentication (2FA)
|
||||
|
||||
### Overview
|
||||
|
||||
- **Optional per-user** - Enable/disable 2FA individually
|
||||
- **TOTP standard** - Compatible with Google Authenticator, Microsoft Authenticator, Authy, 1Password, etc.
|
||||
- **Configurable** - SHA1/SHA256/SHA512, 6/8 digits, custom time periods
|
||||
- **Backup codes** - One-time recovery codes with secure hashing
|
||||
- **Clock skew** - Handles time differences between client/server
|
||||
|
||||
### Setup
|
||||
|
||||
```go
|
||||
// 1. Wrap existing authenticator with 2FA support
|
||||
baseAuth := security.NewDatabaseAuthenticator(db)
|
||||
tfaProvider := security.NewMemoryTwoFactorProvider(nil) // Use custom DB implementation in production
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, tfaProvider, nil)
|
||||
|
||||
// 2. Use as normal authenticator
|
||||
provider := security.NewCompositeSecurityProvider(tfaAuth, colSec, rowSec)
|
||||
securityList := security.NewSecurityList(provider)
|
||||
```
|
||||
|
||||
### Enable 2FA for User
|
||||
|
||||
```go
|
||||
// 1. Initiate 2FA setup
|
||||
secret, err := tfaAuth.Setup2FA(userID, "MyApp", "user@example.com")
|
||||
// Returns: secret.Secret, secret.QRCodeURL, secret.BackupCodes
|
||||
|
||||
// 2. User scans QR code with authenticator app
|
||||
// Display secret.QRCodeURL as QR code image
|
||||
|
||||
// 3. User enters verification code from app
|
||||
code := "123456" // From authenticator app
|
||||
err = tfaAuth.Enable2FA(userID, secret.Secret, code)
|
||||
// 2FA is now enabled for this user
|
||||
|
||||
// 4. Store backup codes securely and show to user once
|
||||
// Display: secret.BackupCodes (10 codes)
|
||||
```
|
||||
|
||||
### Login Flow with 2FA
|
||||
|
||||
```go
|
||||
// 1. User provides credentials
|
||||
req := security.LoginRequest{
|
||||
Username: "user@example.com",
|
||||
Password: "password",
|
||||
}
|
||||
|
||||
resp, err := tfaAuth.Login(ctx, req)
|
||||
|
||||
// 2. Check if 2FA required
|
||||
if resp.Requires2FA {
|
||||
// Prompt user for 2FA code
|
||||
code := getUserInput() // From authenticator app or backup code
|
||||
|
||||
// 3. Login again with 2FA code
|
||||
req.TwoFactorCode = code
|
||||
resp, err = tfaAuth.Login(ctx, req)
|
||||
|
||||
// 4. Success - token is returned
|
||||
token := resp.Token
|
||||
}
|
||||
```
|
||||
|
||||
### Manage 2FA
|
||||
|
||||
```go
|
||||
// Disable 2FA
|
||||
err := tfaAuth.Disable2FA(userID)
|
||||
|
||||
// Regenerate backup codes
|
||||
newCodes, err := tfaAuth.RegenerateBackupCodes(userID, 10)
|
||||
|
||||
// Check status
|
||||
has2FA, err := tfaProvider.Get2FAStatus(userID)
|
||||
```
|
||||
|
||||
### Custom 2FA Storage
|
||||
|
||||
**Option 1: Use DatabaseTwoFactorProvider (Recommended)**
|
||||
|
||||
```go
|
||||
// Uses PostgreSQL stored procedures for all operations
|
||||
db := setupDatabase()
|
||||
|
||||
// Run migrations from totp_database_schema.sql
|
||||
// - Add totp_secret, totp_enabled, totp_enabled_at to users table
|
||||
// - Create user_totp_backup_codes table
|
||||
// - Create resolvespec_totp_* stored procedures
|
||||
|
||||
tfaProvider := security.NewDatabaseTwoFactorProvider(db, nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, tfaProvider, nil)
|
||||
```
|
||||
|
||||
**Option 2: Implement Custom Provider**
|
||||
|
||||
Implement `TwoFactorAuthProvider` for custom storage:
|
||||
|
||||
```go
|
||||
type DBTwoFactorProvider struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func (p *DBTwoFactorProvider) Enable2FA(userID int, secret string, backupCodes []string) error {
|
||||
// Store secret and hashed backup codes in database
|
||||
return p.db.Exec("UPDATE users SET totp_secret = ?, backup_codes = ? WHERE id = ?",
|
||||
secret, hashCodes(backupCodes), userID).Error
|
||||
}
|
||||
|
||||
func (p *DBTwoFactorProvider) Get2FASecret(userID int) (string, error) {
|
||||
var secret string
|
||||
err := p.db.Raw("SELECT totp_secret FROM users WHERE id = ?", userID).Scan(&secret).Error
|
||||
return secret, err
|
||||
}
|
||||
|
||||
// Implement remaining methods: Generate2FASecret, Validate2FACode, Disable2FA,
|
||||
// Get2FAStatus, GenerateBackupCodes, ValidateBackupCode
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
```go
|
||||
config := &security.TwoFactorConfig{
|
||||
Algorithm: "SHA256", // SHA1, SHA256, SHA512
|
||||
Digits: 8, // 6 or 8
|
||||
Period: 30, // Seconds per code
|
||||
SkewWindow: 2, // Accept codes ±2 periods
|
||||
}
|
||||
|
||||
totp := security.NewTOTPGenerator(config)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, tfaProvider, config)
|
||||
```
|
||||
|
||||
### API Response Structure
|
||||
|
||||
```go
|
||||
// LoginResponse with 2FA
|
||||
type LoginResponse struct {
|
||||
Token string `json:"token"`
|
||||
Requires2FA bool `json:"requires_2fa"`
|
||||
TwoFactorSetupData *TwoFactorSecret `json:"two_factor_setup,omitempty"`
|
||||
User *UserContext `json:"user"`
|
||||
}
|
||||
|
||||
// TwoFactorSecret for setup
|
||||
type TwoFactorSecret struct {
|
||||
Secret string `json:"secret"` // Base32 encoded
|
||||
QRCodeURL string `json:"qr_code_url"` // otpauth://totp/...
|
||||
BackupCodes []string `json:"backup_codes"` // 10 recovery codes
|
||||
}
|
||||
|
||||
// UserContext includes 2FA status
|
||||
type UserContext struct {
|
||||
UserID int `json:"user_id"`
|
||||
TwoFactorEnabled bool `json:"two_factor_enabled"`
|
||||
// ... other fields
|
||||
}
|
||||
```
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
- **Store secrets encrypted** - Never store TOTP secrets in plain text
|
||||
- **Hash backup codes** - Use SHA-256 before storing
|
||||
- **Rate limit** - Limit 2FA verification attempts
|
||||
- **Require password** - Always verify password before disabling 2FA
|
||||
- **Show backup codes once** - Display only during setup/regeneration
|
||||
- **Log 2FA events** - Track enable/disable/failed attempts
|
||||
- **Mark codes as used** - Backup codes are single-use only
|
||||
|
||||
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
} else {
|
||||
http.Error(w, "Refresh not supported", http.StatusNotImplemented)
|
||||
@@ -558,14 +751,25 @@ resolvespec.RegisterSecurityHooks(resolveHandler, securityList)
|
||||
```
|
||||
HTTP Request
|
||||
↓
|
||||
NewAuthMiddleware (security package)
|
||||
NewOptionalAuthMiddleware (security package) ← recommended for spec routes
|
||||
├─ Calls provider.Authenticate(request)
|
||||
└─ Adds UserContext to context
|
||||
├─ On success: adds authenticated UserContext to context
|
||||
└─ On failure: adds guest UserContext (UserID=0) to context
|
||||
↓
|
||||
SetSecurityMiddleware (security package)
|
||||
└─ Adds SecurityList to context
|
||||
↓
|
||||
Spec Handler (restheadspec/funcspec/resolvespec)
|
||||
Spec Handler (restheadspec/funcspec/resolvespec/websocketspec/mqttspec)
|
||||
└─ Resolves schema + entity + model from request
|
||||
↓
|
||||
BeforeHandle Hook (registered by spec via RegisterSecurityHooks)
|
||||
├─ Adapts spec's HookContext → SecurityContext
|
||||
├─ Calls security.CheckModelAuthAllowed(secCtx, operation)
|
||||
│ ├─ Loads model rules from context or registry
|
||||
│ ├─ SecurityDisabled → allow
|
||||
│ ├─ CanPublicRead/Create/Update/Delete → allow unauthenticated
|
||||
│ └─ UserID == 0 → 401 unauthorized
|
||||
└─ On error: aborts with 401
|
||||
↓
|
||||
BeforeRead Hook (registered by spec)
|
||||
├─ Adapts spec's HookContext → SecurityContext
|
||||
@@ -591,7 +795,8 @@ HTTP Response (secured data)
|
||||
```
|
||||
|
||||
**Key Points:**
|
||||
- Security package is spec-agnostic and provides core logic
|
||||
- `NewOptionalAuthMiddleware` never rejects — it sets guest context on auth failure; `BeforeHandle` enforces auth after model resolution
|
||||
- `BeforeHandle` fires after model resolution, giving access to model rules and user context simultaneously
|
||||
- Each spec registers its own hooks that adapt to SecurityContext
|
||||
- Security rules are loaded once and cached for the request
|
||||
- Row security is applied to the query (database level)
|
||||
@@ -809,15 +1014,49 @@ func (p *MyProvider) GetRowSecurity(ctx context.Context, userID int, schema, tab
|
||||
}
|
||||
```
|
||||
|
||||
## Model-Level Access Control
|
||||
|
||||
Use `ModelRules` (from `pkg/modelregistry`) to control per-entity auth behavior:
|
||||
|
||||
```go
|
||||
modelregistry.RegisterModelWithRules("public.products", &Product{}, modelregistry.ModelRules{
|
||||
SecurityDisabled: false, // true = skip all auth checks
|
||||
CanPublicRead: true, // unauthenticated GET allowed
|
||||
CanPublicCreate: false, // requires auth
|
||||
CanPublicUpdate: false, // requires auth
|
||||
CanPublicDelete: false, // requires auth
|
||||
CanUpdate: true, // authenticated users can update
|
||||
CanDelete: false, // authenticated users cannot delete
|
||||
})
|
||||
```
|
||||
|
||||
`CheckModelAuthAllowed(secCtx, operation)` applies these rules in `BeforeHandle`:
|
||||
1. `SecurityDisabled` → allow all
|
||||
2. `CanPublicRead/Create/Update/Delete` → allow unauthenticated for that operation
|
||||
3. Guest (UserID == 0) → return 401
|
||||
4. Authenticated → allow (operation-specific `CanUpdate`/`CanDelete` checked in `BeforeUpdate`/`BeforeDelete`)
|
||||
|
||||
---
|
||||
|
||||
## Middleware and Handler API
|
||||
|
||||
### NewAuthMiddleware
|
||||
Standard middleware that authenticates all requests:
|
||||
Standard middleware that authenticates all requests and returns 401 on failure:
|
||||
|
||||
```go
|
||||
router.Use(security.NewAuthMiddleware(securityList))
|
||||
```
|
||||
|
||||
### NewOptionalAuthMiddleware
|
||||
Middleware for spec routes — always continues; sets guest context on auth failure:
|
||||
|
||||
```go
|
||||
// Use with RegisterSecurityHooks — auth enforcement is deferred to BeforeHandle
|
||||
apiRouter.Use(security.NewOptionalAuthMiddleware(securityList))
|
||||
apiRouter.Use(security.SetSecurityMiddleware(securityList))
|
||||
restheadspec.RegisterSecurityHooks(handler, securityList) // registers BeforeHandle
|
||||
```
|
||||
|
||||
Routes can skip authentication using the `SkipAuth` helper:
|
||||
|
||||
```go
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -135,12 +135,6 @@ func (a *JWTAuthenticatorExample) Login(ctx context.Context, req LoginRequest) (
|
||||
}
|
||||
|
||||
func (a *JWTAuthenticatorExample) Logout(ctx context.Context, req LogoutRequest) error {
|
||||
// For JWT, logout could involve token blacklisting
|
||||
// Add token to blacklist table
|
||||
// err := a.db.WithContext(ctx).Table("token_blacklist").Create(map[string]interface{}{
|
||||
// "token": req.Token,
|
||||
// "expires_at": time.Now().Add(24 * time.Hour),
|
||||
// }).Error
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/logger"
|
||||
"github.com/bitechdev/ResolveSpec/pkg/modelregistry"
|
||||
)
|
||||
|
||||
// SecurityContext is a generic interface that any spec can implement to integrate with security features
|
||||
@@ -226,6 +227,122 @@ func ApplyColumnSecurity(secCtx SecurityContext, securityList *SecurityList) err
|
||||
return applyColumnSecurity(secCtx, securityList)
|
||||
}
|
||||
|
||||
// checkModelUpdateAllowed returns an error if CanUpdate is false for the model.
|
||||
// Rules are read from context (set by NewModelAuthMiddleware) with a fallback to the model registry.
|
||||
func checkModelUpdateAllowed(secCtx SecurityContext) error {
|
||||
rules, ok := GetModelRulesFromContext(secCtx.GetContext())
|
||||
if !ok {
|
||||
schema := secCtx.GetSchema()
|
||||
entity := secCtx.GetEntity()
|
||||
var err error
|
||||
if schema != "" {
|
||||
rules, err = modelregistry.GetModelRulesByName(fmt.Sprintf("%s.%s", schema, entity))
|
||||
}
|
||||
if err != nil || schema == "" {
|
||||
rules, err = modelregistry.GetModelRulesByName(entity)
|
||||
}
|
||||
if err != nil {
|
||||
return nil // model not registered, allow by default
|
||||
}
|
||||
}
|
||||
if !rules.CanUpdate {
|
||||
return fmt.Errorf("update not allowed for %s", secCtx.GetEntity())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkModelDeleteAllowed returns an error if CanDelete is false for the model.
|
||||
// Rules are read from context (set by NewModelAuthMiddleware) with a fallback to the model registry.
|
||||
func checkModelDeleteAllowed(secCtx SecurityContext) error {
|
||||
rules, ok := GetModelRulesFromContext(secCtx.GetContext())
|
||||
if !ok {
|
||||
schema := secCtx.GetSchema()
|
||||
entity := secCtx.GetEntity()
|
||||
var err error
|
||||
if schema != "" {
|
||||
rules, err = modelregistry.GetModelRulesByName(fmt.Sprintf("%s.%s", schema, entity))
|
||||
}
|
||||
if err != nil || schema == "" {
|
||||
rules, err = modelregistry.GetModelRulesByName(entity)
|
||||
}
|
||||
if err != nil {
|
||||
return nil // model not registered, allow by default
|
||||
}
|
||||
}
|
||||
if !rules.CanDelete {
|
||||
return fmt.Errorf("delete not allowed for %s", secCtx.GetEntity())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckModelAuthAllowed checks whether the requested operation is permitted based on
|
||||
// model rules and the current user's authentication state. It is intended for use in
|
||||
// a BeforeHandle hook, fired after model resolution.
|
||||
//
|
||||
// Logic:
|
||||
// 1. Load model rules from context (set by NewModelAuthMiddleware) or fall back to registry.
|
||||
// 2. SecurityDisabled → allow.
|
||||
// 3. operation == "read" && CanPublicRead → allow.
|
||||
// 4. operation == "create" && CanPublicCreate → allow.
|
||||
// 5. operation == "update" && CanPublicUpdate → allow.
|
||||
// 6. operation == "delete" && CanPublicDelete → allow.
|
||||
// 7. Guest (UserID == 0) → return "authentication required".
|
||||
// 8. Authenticated user → allow (operation-specific checks remain in BeforeUpdate/BeforeDelete).
|
||||
func CheckModelAuthAllowed(secCtx SecurityContext, operation string) error {
|
||||
rules, ok := GetModelRulesFromContext(secCtx.GetContext())
|
||||
if !ok {
|
||||
schema := secCtx.GetSchema()
|
||||
entity := secCtx.GetEntity()
|
||||
var err error
|
||||
if schema != "" {
|
||||
rules, err = modelregistry.GetModelRulesByName(fmt.Sprintf("%s.%s", schema, entity))
|
||||
}
|
||||
if err != nil || schema == "" {
|
||||
rules, err = modelregistry.GetModelRulesByName(entity)
|
||||
}
|
||||
if err != nil {
|
||||
// Model not registered - fall through to auth check
|
||||
userID, _ := secCtx.GetUserID()
|
||||
if userID == 0 {
|
||||
return fmt.Errorf("authentication required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if rules.SecurityDisabled {
|
||||
return nil
|
||||
}
|
||||
if operation == "read" && rules.CanPublicRead {
|
||||
return nil
|
||||
}
|
||||
if operation == "create" && rules.CanPublicCreate {
|
||||
return nil
|
||||
}
|
||||
if operation == "update" && rules.CanPublicUpdate {
|
||||
return nil
|
||||
}
|
||||
if operation == "delete" && rules.CanPublicDelete {
|
||||
return nil
|
||||
}
|
||||
|
||||
userID, _ := secCtx.GetUserID()
|
||||
if userID == 0 {
|
||||
return fmt.Errorf("authentication required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckModelUpdateAllowed is the public wrapper for checkModelUpdateAllowed.
|
||||
func CheckModelUpdateAllowed(secCtx SecurityContext) error {
|
||||
return checkModelUpdateAllowed(secCtx)
|
||||
}
|
||||
|
||||
// CheckModelDeleteAllowed is the public wrapper for checkModelDeleteAllowed.
|
||||
func CheckModelDeleteAllowed(secCtx SecurityContext) error {
|
||||
return checkModelDeleteAllowed(secCtx)
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func contains(s, substr string) bool {
|
||||
|
||||
@@ -17,22 +17,37 @@ type UserContext struct {
|
||||
Email string `json:"email"`
|
||||
Claims map[string]any `json:"claims"`
|
||||
Meta map[string]any `json:"meta"` // Additional metadata that can hold any JSON-serializable values
|
||||
TwoFactorEnabled bool `json:"two_factor_enabled"` // Indicates if 2FA is enabled for this user
|
||||
}
|
||||
|
||||
// LoginRequest contains credentials for login
|
||||
type LoginRequest struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
TwoFactorCode string `json:"two_factor_code,omitempty"` // TOTP or backup code
|
||||
Claims map[string]any `json:"claims"` // Additional login data
|
||||
Meta map[string]any `json:"meta"` // Additional metadata to be set on user context
|
||||
}
|
||||
|
||||
// RegisterRequest contains information for new user registration
|
||||
type RegisterRequest struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Email string `json:"email"`
|
||||
UserLevel int `json:"user_level"`
|
||||
Roles []string `json:"roles"`
|
||||
Claims map[string]any `json:"claims"` // Additional registration data
|
||||
Meta map[string]any `json:"meta"` // Additional metadata
|
||||
}
|
||||
|
||||
// LoginResponse contains the result of a login attempt
|
||||
type LoginResponse struct {
|
||||
Token string `json:"token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
User *UserContext `json:"user"`
|
||||
ExpiresIn int64 `json:"expires_in"` // Token expiration in seconds
|
||||
Requires2FA bool `json:"requires_2fa"` // True if 2FA code is required
|
||||
TwoFactorSetupData *TwoFactorSecret `json:"two_factor_setup,omitempty"` // Present when setting up 2FA
|
||||
Meta map[string]any `json:"meta"` // Additional metadata to be set on user context
|
||||
}
|
||||
|
||||
@@ -55,6 +70,12 @@ type Authenticator interface {
|
||||
Authenticate(r *http.Request) (*UserContext, error)
|
||||
}
|
||||
|
||||
// Registrable allows providers to support user registration
|
||||
type Registrable interface {
|
||||
// Register creates a new user account
|
||||
Register(ctx context.Context, req RegisterRequest) (*LoginResponse, error)
|
||||
}
|
||||
|
||||
// ColumnSecurityProvider handles column-level security (masking/hiding)
|
||||
type ColumnSecurityProvider interface {
|
||||
// GetColumnSecurity loads column security rules for a user and entity
|
||||
|
||||
81
pkg/security/keystore.go
Normal file
81
pkg/security/keystore.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"time"
|
||||
)
|
||||
|
||||
// hashSHA256Hex returns the lowercase hex SHA-256 digest of the given string.
|
||||
// Used by all keystore implementations to hash raw keys before storage or lookup.
|
||||
func hashSHA256Hex(raw string) string {
|
||||
sum := sha256.Sum256([]byte(raw))
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
// KeyType identifies the category of an auth key.
|
||||
type KeyType string
|
||||
|
||||
const (
|
||||
// KeyTypeJWTSecret is a per-user JWT signing secret for token generation.
|
||||
KeyTypeJWTSecret KeyType = "jwt_secret"
|
||||
// KeyTypeHeaderAPI is a static API key sent via a request header.
|
||||
KeyTypeHeaderAPI KeyType = "header_api"
|
||||
// KeyTypeOAuth2 holds OAuth2 client credentials (client_id / client_secret).
|
||||
KeyTypeOAuth2 KeyType = "oauth2"
|
||||
// KeyTypeGenericAPI is a generic application API key.
|
||||
KeyTypeGenericAPI KeyType = "api"
|
||||
)
|
||||
|
||||
// UserKey represents a single named auth key belonging to a user.
|
||||
// KeyHash stores the SHA-256 hex digest of the raw key; the raw key is never persisted.
|
||||
type UserKey struct {
|
||||
ID int64 `json:"id"`
|
||||
UserID int `json:"user_id"`
|
||||
KeyType KeyType `json:"key_type"`
|
||||
KeyHash string `json:"key_hash"` // SHA-256 hex; never the raw key
|
||||
Name string `json:"name"`
|
||||
Scopes []string `json:"scopes,omitempty"`
|
||||
Meta map[string]any `json:"meta,omitempty"`
|
||||
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LastUsedAt *time.Time `json:"last_used_at,omitempty"`
|
||||
IsActive bool `json:"is_active"`
|
||||
}
|
||||
|
||||
// CreateKeyRequest specifies the parameters for a new key.
|
||||
type CreateKeyRequest struct {
|
||||
UserID int
|
||||
KeyType KeyType
|
||||
Name string
|
||||
Scopes []string
|
||||
Meta map[string]any
|
||||
ExpiresAt *time.Time
|
||||
}
|
||||
|
||||
// CreateKeyResponse is returned exactly once when a key is created.
|
||||
// The caller is responsible for persisting RawKey; it is not stored anywhere.
|
||||
type CreateKeyResponse struct {
|
||||
Key UserKey
|
||||
RawKey string // crypto/rand 32 bytes, base64url-encoded
|
||||
}
|
||||
|
||||
// KeyStore manages per-user auth keys with pluggable storage backends.
|
||||
// Implementations: ConfigKeyStore (static list) and DatabaseKeyStore (stored procedures).
|
||||
type KeyStore interface {
|
||||
// CreateKey generates a new key, stores its hash, and returns the raw key once.
|
||||
CreateKey(ctx context.Context, req CreateKeyRequest) (*CreateKeyResponse, error)
|
||||
|
||||
// GetUserKeys returns all active, non-expired keys for a user.
|
||||
// Pass an empty KeyType to return all types.
|
||||
GetUserKeys(ctx context.Context, userID int, keyType KeyType) ([]UserKey, error)
|
||||
|
||||
// DeleteKey soft-deletes a key by ID after verifying ownership.
|
||||
DeleteKey(ctx context.Context, userID int, keyID int64) error
|
||||
|
||||
// ValidateKey checks a raw key, returns the matching UserKey on success.
|
||||
// The implementation hashes the raw key before any lookup.
|
||||
// Pass an empty KeyType to accept any type.
|
||||
ValidateKey(ctx context.Context, rawKey string, keyType KeyType) (*UserKey, error)
|
||||
}
|
||||
97
pkg/security/keystore_authenticator.go
Normal file
97
pkg/security/keystore_authenticator.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// KeyStoreAuthenticator implements the Authenticator interface using a KeyStore.
|
||||
// It is suitable for long-lived application credentials (API keys, JWT secrets, etc.)
|
||||
// rather than interactive sessions. Login and Logout are not supported — key lifecycle
|
||||
// is managed directly through the KeyStore.
|
||||
//
|
||||
// Key extraction order:
|
||||
// 1. Authorization: Bearer <key>
|
||||
// 2. Authorization: ApiKey <key>
|
||||
// 3. X-API-Key header
|
||||
type KeyStoreAuthenticator struct {
|
||||
keyStore KeyStore
|
||||
keyType KeyType // empty = accept any type
|
||||
}
|
||||
|
||||
// NewKeyStoreAuthenticator creates a KeyStoreAuthenticator.
|
||||
// Pass an empty keyType to accept keys of any type.
|
||||
func NewKeyStoreAuthenticator(ks KeyStore, keyType KeyType) *KeyStoreAuthenticator {
|
||||
return &KeyStoreAuthenticator{keyStore: ks, keyType: keyType}
|
||||
}
|
||||
|
||||
// Login is not supported for keystore authentication.
|
||||
func (a *KeyStoreAuthenticator) Login(_ context.Context, _ LoginRequest) (*LoginResponse, error) {
|
||||
return nil, fmt.Errorf("keystore authenticator does not support login")
|
||||
}
|
||||
|
||||
// Logout is not supported for keystore authentication.
|
||||
func (a *KeyStoreAuthenticator) Logout(_ context.Context, _ LogoutRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Authenticate extracts an API key from the request and validates it against the KeyStore.
|
||||
// Returns a UserContext built from the matching UserKey on success.
|
||||
func (a *KeyStoreAuthenticator) Authenticate(r *http.Request) (*UserContext, error) {
|
||||
rawKey := extractAPIKey(r)
|
||||
if rawKey == "" {
|
||||
return nil, fmt.Errorf("API key required (Authorization: Bearer/ApiKey <key> or X-API-Key header)")
|
||||
}
|
||||
|
||||
userKey, err := a.keyStore.ValidateKey(r.Context(), rawKey, a.keyType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid API key: %w", err)
|
||||
}
|
||||
|
||||
return userKeyToUserContext(userKey), nil
|
||||
}
|
||||
|
||||
// extractAPIKey extracts a raw key from the request using the following precedence:
|
||||
// 1. Authorization: Bearer <key>
|
||||
// 2. Authorization: ApiKey <key>
|
||||
// 3. X-API-Key header
|
||||
func extractAPIKey(r *http.Request) string {
|
||||
if auth := r.Header.Get("Authorization"); auth != "" {
|
||||
if after, ok := strings.CutPrefix(auth, "Bearer "); ok {
|
||||
return strings.TrimSpace(after)
|
||||
}
|
||||
if after, ok := strings.CutPrefix(auth, "ApiKey "); ok {
|
||||
return strings.TrimSpace(after)
|
||||
}
|
||||
}
|
||||
return strings.TrimSpace(r.Header.Get("X-API-Key"))
|
||||
}
|
||||
|
||||
// userKeyToUserContext converts a UserKey into a UserContext.
|
||||
// Scopes are mapped to Roles. Key type and name are stored in Claims.
|
||||
func userKeyToUserContext(k *UserKey) *UserContext {
|
||||
claims := map[string]any{
|
||||
"key_type": string(k.KeyType),
|
||||
"key_name": k.Name,
|
||||
}
|
||||
|
||||
meta := k.Meta
|
||||
if meta == nil {
|
||||
meta = map[string]any{}
|
||||
}
|
||||
|
||||
roles := k.Scopes
|
||||
if roles == nil {
|
||||
roles = []string{}
|
||||
}
|
||||
|
||||
return &UserContext{
|
||||
UserID: k.UserID,
|
||||
SessionID: fmt.Sprintf("key:%d", k.ID),
|
||||
Roles: roles,
|
||||
Claims: claims,
|
||||
Meta: meta,
|
||||
}
|
||||
}
|
||||
149
pkg/security/keystore_config.go
Normal file
149
pkg/security/keystore_config.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ConfigKeyStore is an in-memory keystore backed by a static slice of UserKey values.
|
||||
// It is designed for config-file driven setups (e.g. service accounts defined in YAML)
|
||||
// with a small, bounded number of keys. For large or dynamic key sets use DatabaseKeyStore.
|
||||
//
|
||||
// Pre-existing entries must have KeyHash set to the SHA-256 hex of the intended raw key.
|
||||
// Keys created at runtime via CreateKey are held in memory only and lost on restart.
|
||||
type ConfigKeyStore struct {
|
||||
mu sync.RWMutex
|
||||
keys []UserKey
|
||||
next int64 // monotonic ID counter for runtime-created keys (atomic)
|
||||
}
|
||||
|
||||
// NewConfigKeyStore creates a ConfigKeyStore seeded with the provided keys.
|
||||
// Pass nil or an empty slice to start with no pre-loaded keys.
|
||||
// Zero-value entries (CreatedAt is zero) are treated as active and assigned the current time.
|
||||
func NewConfigKeyStore(keys []UserKey) *ConfigKeyStore {
|
||||
var maxID int64
|
||||
copied := make([]UserKey, len(keys))
|
||||
copy(copied, keys)
|
||||
for i := range copied {
|
||||
if copied[i].CreatedAt.IsZero() {
|
||||
copied[i].IsActive = true
|
||||
copied[i].CreatedAt = time.Now()
|
||||
}
|
||||
if copied[i].ID > maxID {
|
||||
maxID = copied[i].ID
|
||||
}
|
||||
}
|
||||
return &ConfigKeyStore{keys: copied, next: maxID}
|
||||
}
|
||||
|
||||
// CreateKey generates a new raw key, stores its SHA-256 hash, and returns the raw key once.
|
||||
func (s *ConfigKeyStore) CreateKey(_ context.Context, req CreateKeyRequest) (*CreateKeyResponse, error) {
|
||||
rawBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(rawBytes); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate key material: %w", err)
|
||||
}
|
||||
rawKey := base64.RawURLEncoding.EncodeToString(rawBytes)
|
||||
hash := hashSHA256Hex(rawKey)
|
||||
|
||||
id := atomic.AddInt64(&s.next, 1)
|
||||
key := UserKey{
|
||||
ID: id,
|
||||
UserID: req.UserID,
|
||||
KeyType: req.KeyType,
|
||||
KeyHash: hash,
|
||||
Name: req.Name,
|
||||
Scopes: req.Scopes,
|
||||
Meta: req.Meta,
|
||||
ExpiresAt: req.ExpiresAt,
|
||||
CreatedAt: time.Now(),
|
||||
IsActive: true,
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.keys = append(s.keys, key)
|
||||
s.mu.Unlock()
|
||||
|
||||
return &CreateKeyResponse{Key: key, RawKey: rawKey}, nil
|
||||
}
|
||||
|
||||
// GetUserKeys returns all active, non-expired keys for the given user.
|
||||
// Pass an empty KeyType to return all types.
|
||||
func (s *ConfigKeyStore) GetUserKeys(_ context.Context, userID int, keyType KeyType) ([]UserKey, error) {
|
||||
now := time.Now()
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
var result []UserKey
|
||||
for i := range s.keys {
|
||||
k := &s.keys[i]
|
||||
if k.UserID != userID || !k.IsActive {
|
||||
continue
|
||||
}
|
||||
if k.ExpiresAt != nil && k.ExpiresAt.Before(now) {
|
||||
continue
|
||||
}
|
||||
if keyType != "" && k.KeyType != keyType {
|
||||
continue
|
||||
}
|
||||
result = append(result, *k)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DeleteKey soft-deletes a key by setting IsActive to false after ownership verification.
|
||||
func (s *ConfigKeyStore) DeleteKey(_ context.Context, userID int, keyID int64) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
for i := range s.keys {
|
||||
if s.keys[i].ID == keyID {
|
||||
if s.keys[i].UserID != userID {
|
||||
return fmt.Errorf("key not found or permission denied")
|
||||
}
|
||||
s.keys[i].IsActive = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("key not found")
|
||||
}
|
||||
|
||||
// ValidateKey hashes the raw key and finds a matching, active, non-expired entry.
|
||||
// Uses constant-time comparison to prevent timing side-channels.
|
||||
// Pass an empty KeyType to accept any type.
|
||||
func (s *ConfigKeyStore) ValidateKey(_ context.Context, rawKey string, keyType KeyType) (*UserKey, error) {
|
||||
hash := hashSHA256Hex(rawKey)
|
||||
hashBytes, _ := hex.DecodeString(hash)
|
||||
now := time.Now()
|
||||
|
||||
// Write lock: ValidateKey updates LastUsedAt on the matched entry.
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
for i := range s.keys {
|
||||
k := &s.keys[i]
|
||||
if !k.IsActive {
|
||||
continue
|
||||
}
|
||||
if k.ExpiresAt != nil && k.ExpiresAt.Before(now) {
|
||||
continue
|
||||
}
|
||||
if keyType != "" && k.KeyType != keyType {
|
||||
continue
|
||||
}
|
||||
stored, _ := hex.DecodeString(k.KeyHash)
|
||||
if subtle.ConstantTimeCompare(hashBytes, stored) != 1 {
|
||||
continue
|
||||
}
|
||||
k.LastUsedAt = &now
|
||||
result := *k
|
||||
return &result, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid or expired key")
|
||||
}
|
||||
256
pkg/security/keystore_database.go
Normal file
256
pkg/security/keystore_database.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/cache"
|
||||
)
|
||||
|
||||
// DatabaseKeyStoreOptions configures DatabaseKeyStore.
|
||||
type DatabaseKeyStoreOptions struct {
|
||||
// Cache is an optional cache instance. If nil, uses the default cache.
|
||||
Cache *cache.Cache
|
||||
// CacheTTL is the duration to cache ValidateKey results.
|
||||
// Default: 2 minutes.
|
||||
CacheTTL time.Duration
|
||||
// SQLNames provides custom procedure names. If nil, uses DefaultKeyStoreSQLNames().
|
||||
SQLNames *KeyStoreSQLNames
|
||||
// DBFactory is called to obtain a fresh *sql.DB when the existing connection is closed.
|
||||
// If nil, reconnection is disabled.
|
||||
DBFactory func() (*sql.DB, error)
|
||||
}
|
||||
|
||||
// DatabaseKeyStore is a KeyStore backed by PostgreSQL stored procedures.
|
||||
// All DB operations go through configurable procedure names; the raw key is
|
||||
// never passed to the database.
|
||||
//
|
||||
// See keystore_schema.sql for the required table and procedure definitions.
|
||||
//
|
||||
// Note: DeleteKey invalidates the cache entry for the deleted key. Due to the
|
||||
// cache TTL, a deleted key may continue to authenticate for up to CacheTTL
|
||||
// (default 2 minutes) if the cache entry cannot be invalidated.
|
||||
type DatabaseKeyStore struct {
|
||||
db *sql.DB
|
||||
dbMu sync.RWMutex
|
||||
dbFactory func() (*sql.DB, error)
|
||||
sqlNames *KeyStoreSQLNames
|
||||
cache *cache.Cache
|
||||
cacheTTL time.Duration
|
||||
}
|
||||
|
||||
// NewDatabaseKeyStore creates a DatabaseKeyStore with optional configuration.
|
||||
func NewDatabaseKeyStore(db *sql.DB, opts ...DatabaseKeyStoreOptions) *DatabaseKeyStore {
|
||||
o := DatabaseKeyStoreOptions{}
|
||||
if len(opts) > 0 {
|
||||
o = opts[0]
|
||||
}
|
||||
if o.CacheTTL == 0 {
|
||||
o.CacheTTL = 2 * time.Minute
|
||||
}
|
||||
c := o.Cache
|
||||
if c == nil {
|
||||
c = cache.GetDefaultCache()
|
||||
}
|
||||
names := MergeKeyStoreSQLNames(DefaultKeyStoreSQLNames(), o.SQLNames)
|
||||
return &DatabaseKeyStore{
|
||||
db: db,
|
||||
dbFactory: o.DBFactory,
|
||||
sqlNames: names,
|
||||
cache: c,
|
||||
cacheTTL: o.CacheTTL,
|
||||
}
|
||||
}
|
||||
|
||||
func (ks *DatabaseKeyStore) getDB() *sql.DB {
|
||||
ks.dbMu.RLock()
|
||||
defer ks.dbMu.RUnlock()
|
||||
return ks.db
|
||||
}
|
||||
|
||||
func (ks *DatabaseKeyStore) reconnectDB() error {
|
||||
if ks.dbFactory == nil {
|
||||
return fmt.Errorf("no db factory configured for reconnect")
|
||||
}
|
||||
newDB, err := ks.dbFactory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ks.dbMu.Lock()
|
||||
ks.db = newDB
|
||||
ks.dbMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateKey generates a raw key, stores its SHA-256 hash via the create procedure,
|
||||
// and returns the raw key once.
|
||||
func (ks *DatabaseKeyStore) CreateKey(ctx context.Context, req CreateKeyRequest) (*CreateKeyResponse, error) {
|
||||
rawBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(rawBytes); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate key material: %w", err)
|
||||
}
|
||||
rawKey := base64.RawURLEncoding.EncodeToString(rawBytes)
|
||||
hash := hashSHA256Hex(rawKey)
|
||||
|
||||
type createRequest struct {
|
||||
UserID int `json:"user_id"`
|
||||
KeyType KeyType `json:"key_type"`
|
||||
KeyHash string `json:"key_hash"`
|
||||
Name string `json:"name"`
|
||||
Scopes []string `json:"scopes,omitempty"`
|
||||
Meta map[string]any `json:"meta,omitempty"`
|
||||
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||
}
|
||||
|
||||
reqJSON, err := json.Marshal(createRequest{
|
||||
UserID: req.UserID,
|
||||
KeyType: req.KeyType,
|
||||
KeyHash: hash,
|
||||
Name: req.Name,
|
||||
Scopes: req.Scopes,
|
||||
Meta: req.Meta,
|
||||
ExpiresAt: req.ExpiresAt,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal create key request: %w", err)
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var keyJSON sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_key::text FROM %s($1::jsonb)`, ks.sqlNames.CreateKey)
|
||||
if err = ks.getDB().QueryRowContext(ctx, query, string(reqJSON)).Scan(&success, &errorMsg, &keyJSON); err != nil {
|
||||
return nil, fmt.Errorf("create key procedure failed: %w", err)
|
||||
}
|
||||
if !success {
|
||||
return nil, errors.New(nullStringOr(errorMsg, "create key failed"))
|
||||
}
|
||||
|
||||
var key UserKey
|
||||
if err = json.Unmarshal([]byte(keyJSON.String), &key); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse created key: %w", err)
|
||||
}
|
||||
|
||||
return &CreateKeyResponse{Key: key, RawKey: rawKey}, nil
|
||||
}
|
||||
|
||||
// GetUserKeys returns all active, non-expired keys for the given user.
|
||||
// Pass an empty KeyType to return all types.
|
||||
func (ks *DatabaseKeyStore) GetUserKeys(ctx context.Context, userID int, keyType KeyType) ([]UserKey, error) {
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var keysJSON sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_keys::text FROM %s($1, $2)`, ks.sqlNames.GetUserKeys)
|
||||
if err := ks.getDB().QueryRowContext(ctx, query, userID, string(keyType)).Scan(&success, &errorMsg, &keysJSON); err != nil {
|
||||
return nil, fmt.Errorf("get user keys procedure failed: %w", err)
|
||||
}
|
||||
if !success {
|
||||
return nil, errors.New(nullStringOr(errorMsg, "get user keys failed"))
|
||||
}
|
||||
|
||||
var keys []UserKey
|
||||
if keysJSON.Valid && keysJSON.String != "" && keysJSON.String != "[]" {
|
||||
if err := json.Unmarshal([]byte(keysJSON.String), &keys); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse user keys: %w", err)
|
||||
}
|
||||
}
|
||||
if keys == nil {
|
||||
keys = []UserKey{}
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// DeleteKey soft-deletes a key after verifying ownership and invalidates its cache entry.
|
||||
// The delete procedure returns the key_hash so no separate lookup is needed.
|
||||
// Note: cache invalidation is best-effort; a cached entry may persist for up to CacheTTL.
|
||||
func (ks *DatabaseKeyStore) DeleteKey(ctx context.Context, userID int, keyID int64) error {
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var keyHash sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_key_hash FROM %s($1, $2)`, ks.sqlNames.DeleteKey)
|
||||
if err := ks.getDB().QueryRowContext(ctx, query, userID, keyID).Scan(&success, &errorMsg, &keyHash); err != nil {
|
||||
return fmt.Errorf("delete key procedure failed: %w", err)
|
||||
}
|
||||
if !success {
|
||||
return errors.New(nullStringOr(errorMsg, "delete key failed"))
|
||||
}
|
||||
|
||||
if keyHash.Valid && keyHash.String != "" && ks.cache != nil {
|
||||
_ = ks.cache.Delete(ctx, keystoreCacheKey(keyHash.String))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateKey hashes the raw key and calls the validate procedure.
|
||||
// Results are cached for CacheTTL to reduce DB load on hot paths.
|
||||
func (ks *DatabaseKeyStore) ValidateKey(ctx context.Context, rawKey string, keyType KeyType) (*UserKey, error) {
|
||||
hash := hashSHA256Hex(rawKey)
|
||||
cacheKey := keystoreCacheKey(hash)
|
||||
|
||||
if ks.cache != nil {
|
||||
var cached UserKey
|
||||
if err := ks.cache.Get(ctx, cacheKey, &cached); err == nil {
|
||||
if cached.IsActive {
|
||||
return &cached, nil
|
||||
}
|
||||
return nil, errors.New("invalid or expired key")
|
||||
}
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var keyJSON sql.NullString
|
||||
|
||||
runQuery := func() error {
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_key::text FROM %s($1, $2)`, ks.sqlNames.ValidateKey)
|
||||
return ks.getDB().QueryRowContext(ctx, query, hash, string(keyType)).Scan(&success, &errorMsg, &keyJSON)
|
||||
}
|
||||
if err := runQuery(); err != nil {
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := ks.reconnectDB(); reconnErr == nil {
|
||||
err = runQuery()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("validate key procedure failed: %w", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("validate key procedure failed: %w", err)
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
return nil, errors.New(nullStringOr(errorMsg, "invalid or expired key"))
|
||||
}
|
||||
|
||||
var key UserKey
|
||||
if err := json.Unmarshal([]byte(keyJSON.String), &key); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse validated key: %w", err)
|
||||
}
|
||||
|
||||
if ks.cache != nil {
|
||||
_ = ks.cache.Set(ctx, cacheKey, key, ks.cacheTTL)
|
||||
}
|
||||
|
||||
return &key, nil
|
||||
}
|
||||
|
||||
func keystoreCacheKey(hash string) string {
|
||||
return "keystore:validate:" + hash
|
||||
}
|
||||
|
||||
// nullStringOr returns s.String if valid, otherwise the fallback.
|
||||
func nullStringOr(s sql.NullString, fallback string) string {
|
||||
if s.Valid && s.String != "" {
|
||||
return s.String
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
187
pkg/security/keystore_schema.sql
Normal file
187
pkg/security/keystore_schema.sql
Normal file
@@ -0,0 +1,187 @@
|
||||
-- Keystore schema for per-user auth keys
|
||||
-- Apply alongside database_schema.sql (requires the users table)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_keys (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
key_type VARCHAR(50) NOT NULL,
|
||||
key_hash VARCHAR(64) NOT NULL UNIQUE, -- SHA-256 hex digest (64 chars)
|
||||
name VARCHAR(255) NOT NULL DEFAULT '',
|
||||
scopes TEXT, -- JSON array, e.g. '["read","write"]'
|
||||
meta JSONB,
|
||||
expires_at TIMESTAMP,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
last_used_at TIMESTAMP,
|
||||
is_active BOOLEAN DEFAULT true
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_user_keys_user_id ON user_keys(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_keys_key_hash ON user_keys(key_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_keys_key_type ON user_keys(key_type);
|
||||
|
||||
-- resolvespec_keystore_get_user_keys
|
||||
-- Returns all active, non-expired keys for a user.
|
||||
-- Pass empty p_key_type to return all key types.
|
||||
CREATE OR REPLACE FUNCTION resolvespec_keystore_get_user_keys(
|
||||
p_user_id INTEGER,
|
||||
p_key_type TEXT DEFAULT ''
|
||||
)
|
||||
RETURNS TABLE(p_success BOOLEAN, p_error TEXT, p_keys JSONB)
|
||||
LANGUAGE plpgsql AS $$
|
||||
DECLARE
|
||||
v_keys JSONB;
|
||||
BEGIN
|
||||
SELECT COALESCE(
|
||||
jsonb_agg(
|
||||
jsonb_build_object(
|
||||
'id', k.id,
|
||||
'user_id', k.user_id,
|
||||
'key_type', k.key_type,
|
||||
'name', k.name,
|
||||
'scopes', CASE WHEN k.scopes IS NOT NULL THEN k.scopes::jsonb ELSE '[]'::jsonb END,
|
||||
'meta', COALESCE(k.meta, '{}'::jsonb),
|
||||
'expires_at', k.expires_at,
|
||||
'created_at', k.created_at,
|
||||
'last_used_at', k.last_used_at,
|
||||
'is_active', k.is_active
|
||||
)
|
||||
),
|
||||
'[]'::jsonb
|
||||
)
|
||||
INTO v_keys
|
||||
FROM user_keys k
|
||||
WHERE k.user_id = p_user_id
|
||||
AND k.is_active = true
|
||||
AND (k.expires_at IS NULL OR k.expires_at > NOW())
|
||||
AND (p_key_type = '' OR k.key_type = p_key_type);
|
||||
|
||||
RETURN QUERY SELECT true, NULL::TEXT, v_keys;
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RETURN QUERY SELECT false, SQLERRM, NULL::JSONB;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- resolvespec_keystore_create_key
|
||||
-- Inserts a new key row. key_hash is provided by the caller (Go hashes the raw key).
|
||||
-- Returns the created key record (without key_hash).
|
||||
CREATE OR REPLACE FUNCTION resolvespec_keystore_create_key(
|
||||
p_request JSONB
|
||||
)
|
||||
RETURNS TABLE(p_success BOOLEAN, p_error TEXT, p_key JSONB)
|
||||
LANGUAGE plpgsql AS $$
|
||||
DECLARE
|
||||
v_id BIGINT;
|
||||
v_created_at TIMESTAMP;
|
||||
v_key JSONB;
|
||||
BEGIN
|
||||
INSERT INTO user_keys (user_id, key_type, key_hash, name, scopes, meta, expires_at)
|
||||
VALUES (
|
||||
(p_request->>'user_id')::INTEGER,
|
||||
p_request->>'key_type',
|
||||
p_request->>'key_hash',
|
||||
COALESCE(p_request->>'name', ''),
|
||||
p_request->>'scopes',
|
||||
p_request->'meta',
|
||||
CASE WHEN p_request->>'expires_at' IS NOT NULL
|
||||
THEN (p_request->>'expires_at')::TIMESTAMP
|
||||
ELSE NULL
|
||||
END
|
||||
)
|
||||
RETURNING id, created_at INTO v_id, v_created_at;
|
||||
|
||||
v_key := jsonb_build_object(
|
||||
'id', v_id,
|
||||
'user_id', (p_request->>'user_id')::INTEGER,
|
||||
'key_type', p_request->>'key_type',
|
||||
'name', COALESCE(p_request->>'name', ''),
|
||||
'scopes', CASE WHEN p_request->>'scopes' IS NOT NULL
|
||||
THEN (p_request->>'scopes')::jsonb
|
||||
ELSE '[]'::jsonb END,
|
||||
'meta', COALESCE(p_request->'meta', '{}'::jsonb),
|
||||
'expires_at', p_request->>'expires_at',
|
||||
'created_at', v_created_at,
|
||||
'is_active', true
|
||||
);
|
||||
|
||||
RETURN QUERY SELECT true, NULL::TEXT, v_key;
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RETURN QUERY SELECT false, SQLERRM, NULL::JSONB;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- resolvespec_keystore_delete_key
|
||||
-- Soft-deletes a key (is_active = false) after verifying ownership.
|
||||
-- Returns p_key_hash so the caller can invalidate cache entries without a separate query.
|
||||
CREATE OR REPLACE FUNCTION resolvespec_keystore_delete_key(
|
||||
p_user_id INTEGER,
|
||||
p_key_id BIGINT
|
||||
)
|
||||
RETURNS TABLE(p_success BOOLEAN, p_error TEXT, p_key_hash TEXT)
|
||||
LANGUAGE plpgsql AS $$
|
||||
DECLARE
|
||||
v_hash TEXT;
|
||||
BEGIN
|
||||
UPDATE user_keys
|
||||
SET is_active = false
|
||||
WHERE id = p_key_id AND user_id = p_user_id AND is_active = true
|
||||
RETURNING key_hash INTO v_hash;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
RETURN QUERY SELECT false, 'key not found or already deleted'::TEXT, NULL::TEXT;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
RETURN QUERY SELECT true, NULL::TEXT, v_hash;
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RETURN QUERY SELECT false, SQLERRM, NULL::TEXT;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- resolvespec_keystore_validate_key
|
||||
-- Looks up a key by its SHA-256 hash, checks active status and expiry,
|
||||
-- updates last_used_at, and returns the key record.
|
||||
-- p_key_type can be empty to accept any key type.
|
||||
CREATE OR REPLACE FUNCTION resolvespec_keystore_validate_key(
|
||||
p_key_hash TEXT,
|
||||
p_key_type TEXT DEFAULT ''
|
||||
)
|
||||
RETURNS TABLE(p_success BOOLEAN, p_error TEXT, p_key JSONB)
|
||||
LANGUAGE plpgsql AS $$
|
||||
DECLARE
|
||||
v_key_rec user_keys%ROWTYPE;
|
||||
v_key JSONB;
|
||||
BEGIN
|
||||
SELECT * INTO v_key_rec
|
||||
FROM user_keys
|
||||
WHERE key_hash = p_key_hash
|
||||
AND is_active = true
|
||||
AND (expires_at IS NULL OR expires_at > NOW())
|
||||
AND (p_key_type = '' OR key_type = p_key_type);
|
||||
|
||||
IF NOT FOUND THEN
|
||||
RETURN QUERY SELECT false, 'invalid or expired key'::TEXT, NULL::JSONB;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
UPDATE user_keys SET last_used_at = NOW() WHERE id = v_key_rec.id;
|
||||
|
||||
v_key := jsonb_build_object(
|
||||
'id', v_key_rec.id,
|
||||
'user_id', v_key_rec.user_id,
|
||||
'key_type', v_key_rec.key_type,
|
||||
'name', v_key_rec.name,
|
||||
'scopes', CASE WHEN v_key_rec.scopes IS NOT NULL
|
||||
THEN v_key_rec.scopes::jsonb
|
||||
ELSE '[]'::jsonb END,
|
||||
'meta', COALESCE(v_key_rec.meta, '{}'::jsonb),
|
||||
'expires_at', v_key_rec.expires_at,
|
||||
'created_at', v_key_rec.created_at,
|
||||
'last_used_at', NOW(),
|
||||
'is_active', v_key_rec.is_active
|
||||
);
|
||||
|
||||
RETURN QUERY SELECT true, NULL::TEXT, v_key;
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
RETURN QUERY SELECT false, SQLERRM, NULL::JSONB;
|
||||
END;
|
||||
$$;
|
||||
61
pkg/security/keystore_sql_names.go
Normal file
61
pkg/security/keystore_sql_names.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package security
|
||||
|
||||
import "fmt"
|
||||
|
||||
// KeyStoreSQLNames holds the configurable stored procedure names used by DatabaseKeyStore.
|
||||
// Use DefaultKeyStoreSQLNames() for defaults and MergeKeyStoreSQLNames() for partial overrides.
|
||||
type KeyStoreSQLNames struct {
|
||||
GetUserKeys string // default: "resolvespec_keystore_get_user_keys"
|
||||
CreateKey string // default: "resolvespec_keystore_create_key"
|
||||
DeleteKey string // default: "resolvespec_keystore_delete_key"
|
||||
ValidateKey string // default: "resolvespec_keystore_validate_key"
|
||||
}
|
||||
|
||||
// DefaultKeyStoreSQLNames returns a KeyStoreSQLNames with all default resolvespec_keystore_* values.
|
||||
func DefaultKeyStoreSQLNames() *KeyStoreSQLNames {
|
||||
return &KeyStoreSQLNames{
|
||||
GetUserKeys: "resolvespec_keystore_get_user_keys",
|
||||
CreateKey: "resolvespec_keystore_create_key",
|
||||
DeleteKey: "resolvespec_keystore_delete_key",
|
||||
ValidateKey: "resolvespec_keystore_validate_key",
|
||||
}
|
||||
}
|
||||
|
||||
// MergeKeyStoreSQLNames returns a copy of base with any non-empty fields from override applied.
|
||||
// If override is nil, a copy of base is returned.
|
||||
func MergeKeyStoreSQLNames(base, override *KeyStoreSQLNames) *KeyStoreSQLNames {
|
||||
if override == nil {
|
||||
copied := *base
|
||||
return &copied
|
||||
}
|
||||
merged := *base
|
||||
if override.GetUserKeys != "" {
|
||||
merged.GetUserKeys = override.GetUserKeys
|
||||
}
|
||||
if override.CreateKey != "" {
|
||||
merged.CreateKey = override.CreateKey
|
||||
}
|
||||
if override.DeleteKey != "" {
|
||||
merged.DeleteKey = override.DeleteKey
|
||||
}
|
||||
if override.ValidateKey != "" {
|
||||
merged.ValidateKey = override.ValidateKey
|
||||
}
|
||||
return &merged
|
||||
}
|
||||
|
||||
// ValidateKeyStoreSQLNames checks that all non-empty procedure names are valid SQL identifiers.
|
||||
func ValidateKeyStoreSQLNames(names *KeyStoreSQLNames) error {
|
||||
fields := map[string]string{
|
||||
"GetUserKeys": names.GetUserKeys,
|
||||
"CreateKey": names.CreateKey,
|
||||
"DeleteKey": names.DeleteKey,
|
||||
"ValidateKey": names.ValidateKey,
|
||||
}
|
||||
for field, val := range fields {
|
||||
if val != "" && !validSQLIdentifier.MatchString(val) {
|
||||
return fmt.Errorf("KeyStoreSQLNames.%s contains invalid characters: %q", field, val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/modelregistry"
|
||||
)
|
||||
|
||||
// contextKey is a custom type for context keys to avoid collisions
|
||||
@@ -23,6 +25,7 @@ const (
|
||||
UserMetaKey contextKey = "user_meta"
|
||||
SkipAuthKey contextKey = "skip_auth"
|
||||
OptionalAuthKey contextKey = "optional_auth"
|
||||
ModelRulesKey contextKey = "model_rules"
|
||||
)
|
||||
|
||||
// SkipAuth returns a context with skip auth flag set to true
|
||||
@@ -136,6 +139,31 @@ func NewOptionalAuthHandler(securityList *SecurityList, next http.Handler) http.
|
||||
})
|
||||
}
|
||||
|
||||
// NewOptionalAuthMiddleware creates authentication middleware that always continues.
|
||||
// On auth failure, a guest user context is set instead of returning 401.
|
||||
// Intended for spec routes where auth enforcement is deferred to a BeforeHandle hook
|
||||
// after model resolution.
|
||||
func NewOptionalAuthMiddleware(securityList *SecurityList) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
provider := securityList.Provider()
|
||||
if provider == nil {
|
||||
http.Error(w, "Security provider not configured", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
userCtx, err := provider.Authenticate(r)
|
||||
if err != nil {
|
||||
guestCtx := createGuestContext(r)
|
||||
next.ServeHTTP(w, setUserContext(r, guestCtx))
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, setUserContext(r, userCtx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// NewAuthMiddleware creates an authentication middleware with the given security list
|
||||
// This middleware extracts user authentication from the request and adds it to context
|
||||
// Routes can skip authentication by setting SkipAuthKey context value (use SkipAuth helper)
|
||||
@@ -182,6 +210,68 @@ func NewAuthMiddleware(securityList *SecurityList) func(http.Handler) http.Handl
|
||||
}
|
||||
}
|
||||
|
||||
// NewModelAuthMiddleware creates authentication middleware that respects ModelRules for the given model name.
|
||||
// It first checks if ModelRules are set for the model:
|
||||
// - If SecurityDisabled is true, authentication is skipped and a guest context is set.
|
||||
// - Otherwise, all checks from NewAuthMiddleware apply (SkipAuthKey, provider check, OptionalAuthKey, Authenticate).
|
||||
//
|
||||
// If the model is not found in any registry, the middleware falls back to standard NewAuthMiddleware behaviour.
|
||||
func NewModelAuthMiddleware(securityList *SecurityList, modelName string) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check ModelRules first
|
||||
if rules, err := modelregistry.GetModelRulesByName(modelName); err == nil {
|
||||
// Store rules in context for downstream use (e.g., security hooks)
|
||||
r = r.WithContext(context.WithValue(r.Context(), ModelRulesKey, rules))
|
||||
|
||||
if rules.SecurityDisabled {
|
||||
guestCtx := createGuestContext(r)
|
||||
next.ServeHTTP(w, setUserContext(r, guestCtx))
|
||||
return
|
||||
}
|
||||
isRead := r.Method == http.MethodGet || r.Method == http.MethodHead
|
||||
isUpdate := r.Method == http.MethodPut || r.Method == http.MethodPatch
|
||||
if (isRead && rules.CanPublicRead) || (isUpdate && rules.CanPublicUpdate) {
|
||||
guestCtx := createGuestContext(r)
|
||||
next.ServeHTTP(w, setUserContext(r, guestCtx))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this route should skip authentication
|
||||
if skip, ok := r.Context().Value(SkipAuthKey).(bool); ok && skip {
|
||||
guestCtx := createGuestContext(r)
|
||||
next.ServeHTTP(w, setUserContext(r, guestCtx))
|
||||
return
|
||||
}
|
||||
|
||||
// Get the security provider
|
||||
provider := securityList.Provider()
|
||||
if provider == nil {
|
||||
http.Error(w, "Security provider not configured", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this route has optional authentication
|
||||
optional, _ := r.Context().Value(OptionalAuthKey).(bool)
|
||||
|
||||
// Try to authenticate
|
||||
userCtx, err := provider.Authenticate(r)
|
||||
if err != nil {
|
||||
if optional {
|
||||
guestCtx := createGuestContext(r)
|
||||
next.ServeHTTP(w, setUserContext(r, guestCtx))
|
||||
return
|
||||
}
|
||||
http.Error(w, "Authentication failed: "+err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, setUserContext(r, userCtx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// SetSecurityMiddleware adds security context to requests
|
||||
// This middleware should be applied after AuthMiddleware
|
||||
func SetSecurityMiddleware(securityList *SecurityList) func(http.Handler) http.Handler {
|
||||
@@ -366,6 +456,131 @@ func GetUserMeta(ctx context.Context) (map[string]any, bool) {
|
||||
return meta, ok
|
||||
}
|
||||
|
||||
// SessionCookieOptions configures the session cookie set by SetSessionCookie.
|
||||
// All fields are optional; sensible secure defaults are applied when omitted.
|
||||
type SessionCookieOptions struct {
|
||||
// Name is the cookie name. Defaults to "session_token".
|
||||
Name string
|
||||
// Path is the cookie path. Defaults to "/".
|
||||
Path string
|
||||
// Domain restricts the cookie to a specific domain. Empty means current host.
|
||||
Domain string
|
||||
// Secure sets the Secure flag. Defaults to true.
|
||||
// Set to false only in local development over HTTP.
|
||||
Secure *bool
|
||||
// SameSite sets the SameSite policy. Defaults to http.SameSiteLaxMode.
|
||||
SameSite http.SameSite
|
||||
}
|
||||
|
||||
func (o SessionCookieOptions) name() string {
|
||||
if o.Name != "" {
|
||||
return o.Name
|
||||
}
|
||||
return "session_token"
|
||||
}
|
||||
|
||||
func (o SessionCookieOptions) path() string {
|
||||
if o.Path != "" {
|
||||
return o.Path
|
||||
}
|
||||
return "/"
|
||||
}
|
||||
|
||||
func (o SessionCookieOptions) secure() bool {
|
||||
if o.Secure != nil {
|
||||
return *o.Secure
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (o SessionCookieOptions) sameSite() http.SameSite {
|
||||
if o.SameSite != 0 {
|
||||
return o.SameSite
|
||||
}
|
||||
return http.SameSiteLaxMode
|
||||
}
|
||||
|
||||
// SetSessionCookie writes the session_token cookie to the response after a successful login.
|
||||
// Call this immediately after a successful Authenticator.Login() call.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// resp, err := auth.Login(r.Context(), req)
|
||||
// if err != nil { ... }
|
||||
// security.SetSessionCookie(w, resp)
|
||||
// json.NewEncoder(w).Encode(resp)
|
||||
func SetSessionCookie(w http.ResponseWriter, loginResp *LoginResponse, opts ...SessionCookieOptions) {
|
||||
var o SessionCookieOptions
|
||||
if len(opts) > 0 {
|
||||
o = opts[0]
|
||||
}
|
||||
|
||||
maxAge := 0
|
||||
if loginResp.ExpiresIn > 0 {
|
||||
maxAge = int(loginResp.ExpiresIn)
|
||||
}
|
||||
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: o.name(),
|
||||
Value: loginResp.Token,
|
||||
Path: o.path(),
|
||||
Domain: o.Domain,
|
||||
MaxAge: maxAge,
|
||||
HttpOnly: true,
|
||||
Secure: o.secure(),
|
||||
SameSite: o.sameSite(),
|
||||
})
|
||||
}
|
||||
|
||||
// GetSessionCookie returns the session token value from the request cookie, or empty string if not present.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// token := security.GetSessionCookie(r)
|
||||
func GetSessionCookie(r *http.Request, opts ...SessionCookieOptions) string {
|
||||
var o SessionCookieOptions
|
||||
if len(opts) > 0 {
|
||||
o = opts[0]
|
||||
}
|
||||
cookie, err := r.Cookie(o.name())
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return cookie.Value
|
||||
}
|
||||
|
||||
// ClearSessionCookie expires the session_token cookie, effectively logging the user out on the browser side.
|
||||
// Call this after a successful Authenticator.Logout() call.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// err := auth.Logout(r.Context(), req)
|
||||
// if err != nil { ... }
|
||||
// security.ClearSessionCookie(w)
|
||||
func ClearSessionCookie(w http.ResponseWriter, opts ...SessionCookieOptions) {
|
||||
var o SessionCookieOptions
|
||||
if len(opts) > 0 {
|
||||
o = opts[0]
|
||||
}
|
||||
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: o.name(),
|
||||
Value: "",
|
||||
Path: o.path(),
|
||||
Domain: o.Domain,
|
||||
MaxAge: -1,
|
||||
HttpOnly: true,
|
||||
Secure: o.secure(),
|
||||
SameSite: o.sameSite(),
|
||||
})
|
||||
}
|
||||
|
||||
// GetModelRulesFromContext extracts ModelRules stored by NewModelAuthMiddleware
|
||||
func GetModelRulesFromContext(ctx context.Context) (modelregistry.ModelRules, bool) {
|
||||
rules, ok := ctx.Value(ModelRulesKey).(modelregistry.ModelRules)
|
||||
return rules, ok
|
||||
}
|
||||
|
||||
// // Handler adapters for resolvespec/restheadspec compatibility
|
||||
// // These functions allow using NewAuthHandler and NewOptionalAuthHandler with custom handler abstractions
|
||||
|
||||
|
||||
615
pkg/security/oauth2_examples.go
Normal file
615
pkg/security/oauth2_examples.go
Normal file
@@ -0,0 +1,615 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// Example: OAuth2 Authentication with Google
|
||||
func ExampleOAuth2Google() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
// Create OAuth2 authenticator for Google
|
||||
oauth2Auth := NewGoogleAuthenticator(
|
||||
"your-client-id",
|
||||
"your-client-secret",
|
||||
"http://localhost:8080/auth/google/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
// Login endpoint - redirects to Google
|
||||
router.HandleFunc("/auth/google/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := oauth2Auth.OAuth2GenerateState()
|
||||
authURL, _ := oauth2Auth.OAuth2GetAuthURL("google", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
// Callback endpoint - handles Google response
|
||||
router.HandleFunc("/auth/google/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
state := r.URL.Query().Get("state")
|
||||
|
||||
loginResp, err := oauth2Auth.OAuth2HandleCallback(r.Context(), "google", code, state)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Set session cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResp.Token,
|
||||
Path: "/",
|
||||
MaxAge: int(loginResp.ExpiresIn),
|
||||
HttpOnly: true,
|
||||
Secure: true,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
})
|
||||
|
||||
// Return user info as JSON
|
||||
_ = json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
_ = http.ListenAndServe(":8080", router)
|
||||
}
|
||||
|
||||
// Example: OAuth2 Authentication with GitHub
|
||||
func ExampleOAuth2GitHub() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
oauth2Auth := NewGitHubAuthenticator(
|
||||
"your-github-client-id",
|
||||
"your-github-client-secret",
|
||||
"http://localhost:8080/auth/github/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
router.HandleFunc("/auth/github/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := oauth2Auth.OAuth2GenerateState()
|
||||
authURL, _ := oauth2Auth.OAuth2GetAuthURL("github", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
router.HandleFunc("/auth/github/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
state := r.URL.Query().Get("state")
|
||||
|
||||
loginResp, err := oauth2Auth.OAuth2HandleCallback(r.Context(), "github", code, state)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
_ = json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
_ = http.ListenAndServe(":8080", router)
|
||||
}
|
||||
|
||||
// Example: Custom OAuth2 Provider
|
||||
func ExampleOAuth2Custom() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
// Custom OAuth2 provider configuration
|
||||
oauth2Auth := NewDatabaseAuthenticator(db).WithOAuth2(OAuth2Config{
|
||||
ClientID: "your-client-id",
|
||||
ClientSecret: "your-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/callback",
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://your-provider.com/oauth/authorize",
|
||||
TokenURL: "https://your-provider.com/oauth/token",
|
||||
UserInfoURL: "https://your-provider.com/oauth/userinfo",
|
||||
ProviderName: "custom-provider",
|
||||
|
||||
// Custom user info parser
|
||||
UserInfoParser: func(userInfo map[string]any) (*UserContext, error) {
|
||||
// Extract custom fields from your provider
|
||||
return &UserContext{
|
||||
UserName: userInfo["username"].(string),
|
||||
Email: userInfo["email"].(string),
|
||||
RemoteID: userInfo["id"].(string),
|
||||
UserLevel: 1,
|
||||
Roles: []string{"user"},
|
||||
Claims: userInfo,
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
router.HandleFunc("/auth/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := oauth2Auth.OAuth2GenerateState()
|
||||
authURL, _ := oauth2Auth.OAuth2GetAuthURL("custom-provider", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
router.HandleFunc("/auth/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
state := r.URL.Query().Get("state")
|
||||
|
||||
loginResp, err := oauth2Auth.OAuth2HandleCallback(r.Context(), "custom-provider", code, state)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
_ = json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
_ = http.ListenAndServe(":8080", router)
|
||||
}
|
||||
|
||||
// Example: Multi-Provider OAuth2 with Security Integration
|
||||
func ExampleOAuth2MultiProvider() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
// Create OAuth2 authenticators for multiple providers
|
||||
googleAuth := NewGoogleAuthenticator(
|
||||
"google-client-id",
|
||||
"google-client-secret",
|
||||
"http://localhost:8080/auth/google/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
githubAuth := NewGitHubAuthenticator(
|
||||
"github-client-id",
|
||||
"github-client-secret",
|
||||
"http://localhost:8080/auth/github/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
// Create column and row security providers
|
||||
colSec := NewDatabaseColumnSecurityProvider(db)
|
||||
rowSec := NewDatabaseRowSecurityProvider(db)
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
// Google OAuth2 routes
|
||||
router.HandleFunc("/auth/google/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := googleAuth.OAuth2GenerateState()
|
||||
authURL, _ := googleAuth.OAuth2GetAuthURL("google", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
router.HandleFunc("/auth/google/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
state := r.URL.Query().Get("state")
|
||||
|
||||
loginResp, err := googleAuth.OAuth2HandleCallback(r.Context(), "google", code, state)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResp.Token,
|
||||
Path: "/",
|
||||
MaxAge: int(loginResp.ExpiresIn),
|
||||
HttpOnly: true,
|
||||
})
|
||||
|
||||
http.Redirect(w, r, "/dashboard", http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
// GitHub OAuth2 routes
|
||||
router.HandleFunc("/auth/github/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := githubAuth.OAuth2GenerateState()
|
||||
authURL, _ := githubAuth.OAuth2GetAuthURL("github", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
router.HandleFunc("/auth/github/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
state := r.URL.Query().Get("state")
|
||||
|
||||
loginResp, err := githubAuth.OAuth2HandleCallback(r.Context(), "github", code, state)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResp.Token,
|
||||
Path: "/",
|
||||
MaxAge: int(loginResp.ExpiresIn),
|
||||
HttpOnly: true,
|
||||
})
|
||||
|
||||
http.Redirect(w, r, "/dashboard", http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
// Use Google auth for protected routes (or GitHub - both work)
|
||||
provider, _ := NewCompositeSecurityProvider(googleAuth, colSec, rowSec)
|
||||
securityList, _ := NewSecurityList(provider)
|
||||
|
||||
// Protected route with authentication
|
||||
protectedRouter := router.PathPrefix("/api").Subrouter()
|
||||
protectedRouter.Use(NewAuthMiddleware(securityList))
|
||||
protectedRouter.Use(SetSecurityMiddleware(securityList))
|
||||
|
||||
protectedRouter.HandleFunc("/profile", func(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, _ := GetUserContext(r.Context())
|
||||
_ = json.NewEncoder(w).Encode(userCtx)
|
||||
})
|
||||
|
||||
_ = http.ListenAndServe(":8080", router)
|
||||
}
|
||||
|
||||
// Example: OAuth2 with Token Refresh
|
||||
func ExampleOAuth2TokenRefresh() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
oauth2Auth := NewGoogleAuthenticator(
|
||||
"your-client-id",
|
||||
"your-client-secret",
|
||||
"http://localhost:8080/auth/google/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
// Refresh token endpoint
|
||||
router.HandleFunc("/auth/refresh", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Provider string `json:"provider"` // "google", "github", etc.
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Default to google if not specified
|
||||
if req.Provider == "" {
|
||||
req.Provider = "google"
|
||||
}
|
||||
|
||||
// Use OAuth2-specific refresh method
|
||||
loginResp, err := oauth2Auth.OAuth2RefreshToken(r.Context(), req.RefreshToken, req.Provider)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Set new session cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResp.Token,
|
||||
Path: "/",
|
||||
MaxAge: int(loginResp.ExpiresIn),
|
||||
HttpOnly: true,
|
||||
Secure: true,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
})
|
||||
|
||||
_ = json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
_ = http.ListenAndServe(":8080", router)
|
||||
}
|
||||
|
||||
// Example: OAuth2 Logout
|
||||
func ExampleOAuth2Logout() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
oauth2Auth := NewGoogleAuthenticator(
|
||||
"your-client-id",
|
||||
"your-client-secret",
|
||||
"http://localhost:8080/auth/google/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
router.HandleFunc("/auth/logout", func(w http.ResponseWriter, r *http.Request) {
|
||||
token := r.Header.Get("Authorization")
|
||||
if token == "" {
|
||||
cookie, err := r.Cookie("session_token")
|
||||
if err == nil {
|
||||
token = cookie.Value
|
||||
}
|
||||
}
|
||||
|
||||
if token != "" {
|
||||
// Get user ID from session
|
||||
userCtx, err := oauth2Auth.Authenticate(r)
|
||||
if err == nil {
|
||||
_ = oauth2Auth.Logout(r.Context(), LogoutRequest{
|
||||
Token: token,
|
||||
UserID: userCtx.UserID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Clear cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: "",
|
||||
Path: "/",
|
||||
MaxAge: -1,
|
||||
HttpOnly: true,
|
||||
})
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("Logged out successfully"))
|
||||
})
|
||||
|
||||
_ = http.ListenAndServe(":8080", router)
|
||||
}
|
||||
|
||||
// Example: Complete OAuth2 Integration with Database Setup
|
||||
func ExampleOAuth2Complete() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
// Create tables (run once)
|
||||
setupOAuth2Tables(db)
|
||||
|
||||
// Create OAuth2 authenticator
|
||||
oauth2Auth := NewGoogleAuthenticator(
|
||||
"your-client-id",
|
||||
"your-client-secret",
|
||||
"http://localhost:8080/auth/google/callback",
|
||||
db,
|
||||
)
|
||||
|
||||
// Create security providers
|
||||
colSec := NewDatabaseColumnSecurityProvider(db)
|
||||
rowSec := NewDatabaseRowSecurityProvider(db)
|
||||
provider, _ := NewCompositeSecurityProvider(oauth2Auth, colSec, rowSec)
|
||||
securityList, _ := NewSecurityList(provider)
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
// Public routes
|
||||
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte("Welcome! <a href='/auth/google/login'>Login with Google</a>"))
|
||||
})
|
||||
|
||||
router.HandleFunc("/auth/google/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := oauth2Auth.OAuth2GenerateState()
|
||||
authURL, _ := oauth2Auth.OAuth2GetAuthURL("github", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
router.HandleFunc("/auth/google/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
state := r.URL.Query().Get("state")
|
||||
|
||||
loginResp, err := oauth2Auth.OAuth2HandleCallback(r.Context(), "github", code, state)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResp.Token,
|
||||
Path: "/",
|
||||
MaxAge: int(loginResp.ExpiresIn),
|
||||
HttpOnly: true,
|
||||
})
|
||||
|
||||
http.Redirect(w, r, "/dashboard", http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
// Protected routes
|
||||
protectedRouter := router.PathPrefix("/").Subrouter()
|
||||
protectedRouter.Use(NewAuthMiddleware(securityList))
|
||||
protectedRouter.Use(SetSecurityMiddleware(securityList))
|
||||
|
||||
protectedRouter.HandleFunc("/dashboard", func(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, _ := GetUserContext(r.Context())
|
||||
_, _ = fmt.Fprintf(w, "Welcome, %s! Your email: %s", userCtx.UserName, userCtx.Email)
|
||||
})
|
||||
|
||||
protectedRouter.HandleFunc("/api/profile", func(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, _ := GetUserContext(r.Context())
|
||||
_ = json.NewEncoder(w).Encode(userCtx)
|
||||
})
|
||||
|
||||
protectedRouter.HandleFunc("/auth/logout", func(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, _ := GetUserContext(r.Context())
|
||||
_ = oauth2Auth.Logout(r.Context(), LogoutRequest{
|
||||
Token: userCtx.SessionID,
|
||||
UserID: userCtx.UserID,
|
||||
})
|
||||
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: "",
|
||||
Path: "/",
|
||||
MaxAge: -1,
|
||||
HttpOnly: true,
|
||||
})
|
||||
|
||||
http.Redirect(w, r, "/", http.StatusTemporaryRedirect)
|
||||
})
|
||||
|
||||
_ = http.ListenAndServe(":8080", router)
|
||||
}
|
||||
|
||||
func setupOAuth2Tables(db *sql.DB) {
|
||||
// Create tables from database_schema.sql
|
||||
// This is a helper function - in production, use migrations
|
||||
ctx := context.Background()
|
||||
|
||||
// Create users table if not exists
|
||||
_, _ = db.ExecContext(ctx, `
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
username VARCHAR(255) NOT NULL UNIQUE,
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
password VARCHAR(255),
|
||||
user_level INTEGER DEFAULT 0,
|
||||
roles VARCHAR(500),
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
last_login_at TIMESTAMP,
|
||||
remote_id VARCHAR(255),
|
||||
auth_provider VARCHAR(50)
|
||||
)
|
||||
`)
|
||||
|
||||
// Create user_sessions table (used for both regular and OAuth2 sessions)
|
||||
_, _ = db.ExecContext(ctx, `
|
||||
CREATE TABLE IF NOT EXISTS user_sessions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
session_token VARCHAR(500) NOT NULL UNIQUE,
|
||||
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
last_activity_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
ip_address VARCHAR(45),
|
||||
user_agent TEXT,
|
||||
access_token TEXT,
|
||||
refresh_token TEXT,
|
||||
token_type VARCHAR(50) DEFAULT 'Bearer',
|
||||
auth_provider VARCHAR(50)
|
||||
)
|
||||
`)
|
||||
}
|
||||
|
||||
// Example: All OAuth2 Providers at Once
|
||||
func ExampleOAuth2AllProviders() {
|
||||
db, _ := sql.Open("postgres", "connection-string")
|
||||
|
||||
// Create authenticator with ALL OAuth2 providers
|
||||
auth := NewDatabaseAuthenticator(db).
|
||||
WithOAuth2(OAuth2Config{
|
||||
ClientID: "google-client-id",
|
||||
ClientSecret: "google-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/google/callback",
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://oauth2.googleapis.com/token",
|
||||
UserInfoURL: "https://www.googleapis.com/oauth2/v2/userinfo",
|
||||
ProviderName: "google",
|
||||
}).
|
||||
WithOAuth2(OAuth2Config{
|
||||
ClientID: "github-client-id",
|
||||
ClientSecret: "github-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/github/callback",
|
||||
Scopes: []string{"user:email"},
|
||||
AuthURL: "https://github.com/login/oauth/authorize",
|
||||
TokenURL: "https://github.com/login/oauth/access_token",
|
||||
UserInfoURL: "https://api.github.com/user",
|
||||
ProviderName: "github",
|
||||
}).
|
||||
WithOAuth2(OAuth2Config{
|
||||
ClientID: "microsoft-client-id",
|
||||
ClientSecret: "microsoft-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/microsoft/callback",
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
|
||||
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
|
||||
UserInfoURL: "https://graph.microsoft.com/v1.0/me",
|
||||
ProviderName: "microsoft",
|
||||
}).
|
||||
WithOAuth2(OAuth2Config{
|
||||
ClientID: "facebook-client-id",
|
||||
ClientSecret: "facebook-client-secret",
|
||||
RedirectURL: "http://localhost:8080/auth/facebook/callback",
|
||||
Scopes: []string{"email"},
|
||||
AuthURL: "https://www.facebook.com/v12.0/dialog/oauth",
|
||||
TokenURL: "https://graph.facebook.com/v12.0/oauth/access_token",
|
||||
UserInfoURL: "https://graph.facebook.com/me?fields=id,name,email",
|
||||
ProviderName: "facebook",
|
||||
})
|
||||
|
||||
// Get list of configured providers
|
||||
providers := auth.OAuth2GetProviders()
|
||||
fmt.Printf("Configured OAuth2 providers: %v\n", providers)
|
||||
|
||||
router := mux.NewRouter()
|
||||
|
||||
// Google routes
|
||||
router.HandleFunc("/auth/google/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := auth.OAuth2GenerateState()
|
||||
authURL, _ := auth.OAuth2GetAuthURL("google", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
router.HandleFunc("/auth/google/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
loginResp, err := auth.OAuth2HandleCallback(r.Context(), "google", r.URL.Query().Get("code"), r.URL.Query().Get("state"))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
// GitHub routes
|
||||
router.HandleFunc("/auth/github/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := auth.OAuth2GenerateState()
|
||||
authURL, _ := auth.OAuth2GetAuthURL("github", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
router.HandleFunc("/auth/github/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
loginResp, err := auth.OAuth2HandleCallback(r.Context(), "github", r.URL.Query().Get("code"), r.URL.Query().Get("state"))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
// Microsoft routes
|
||||
router.HandleFunc("/auth/microsoft/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := auth.OAuth2GenerateState()
|
||||
authURL, _ := auth.OAuth2GetAuthURL("microsoft", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
router.HandleFunc("/auth/microsoft/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
loginResp, err := auth.OAuth2HandleCallback(r.Context(), "microsoft", r.URL.Query().Get("code"), r.URL.Query().Get("state"))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
// Facebook routes
|
||||
router.HandleFunc("/auth/facebook/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
state, _ := auth.OAuth2GenerateState()
|
||||
authURL, _ := auth.OAuth2GetAuthURL("facebook", state)
|
||||
http.Redirect(w, r, authURL, http.StatusTemporaryRedirect)
|
||||
})
|
||||
router.HandleFunc("/auth/facebook/callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
loginResp, err := auth.OAuth2HandleCallback(r.Context(), "facebook", r.URL.Query().Get("code"), r.URL.Query().Get("state"))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(loginResp)
|
||||
})
|
||||
|
||||
// Create security list for protected routes
|
||||
colSec := NewDatabaseColumnSecurityProvider(db)
|
||||
rowSec := NewDatabaseRowSecurityProvider(db)
|
||||
provider, _ := NewCompositeSecurityProvider(auth, colSec, rowSec)
|
||||
securityList, _ := NewSecurityList(provider)
|
||||
|
||||
// Protected routes work for ALL OAuth2 providers + regular sessions
|
||||
protectedRouter := router.PathPrefix("/api").Subrouter()
|
||||
protectedRouter.Use(NewAuthMiddleware(securityList))
|
||||
protectedRouter.Use(SetSecurityMiddleware(securityList))
|
||||
|
||||
protectedRouter.HandleFunc("/profile", func(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, _ := GetUserContext(r.Context())
|
||||
_ = json.NewEncoder(w).Encode(userCtx)
|
||||
})
|
||||
|
||||
_ = http.ListenAndServe(":8080", router)
|
||||
}
|
||||
579
pkg/security/oauth2_methods.go
Normal file
579
pkg/security/oauth2_methods.go
Normal file
@@ -0,0 +1,579 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// OAuth2Config contains configuration for OAuth2 authentication
|
||||
type OAuth2Config struct {
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
RedirectURL string
|
||||
Scopes []string
|
||||
AuthURL string
|
||||
TokenURL string
|
||||
UserInfoURL string
|
||||
ProviderName string
|
||||
|
||||
// Optional: Custom user info parser
|
||||
// If not provided, will use standard claims (sub, email, name)
|
||||
UserInfoParser func(userInfo map[string]any) (*UserContext, error)
|
||||
}
|
||||
|
||||
// OAuth2Provider holds configuration and state for a single OAuth2 provider
|
||||
type OAuth2Provider struct {
|
||||
config *oauth2.Config
|
||||
userInfoURL string
|
||||
userInfoParser func(userInfo map[string]any) (*UserContext, error)
|
||||
providerName string
|
||||
states map[string]time.Time // state -> expiry time
|
||||
statesMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// WithOAuth2 configures OAuth2 support for the DatabaseAuthenticator
|
||||
// Can be called multiple times to add multiple OAuth2 providers
|
||||
// Returns the same DatabaseAuthenticator instance for method chaining
|
||||
func (a *DatabaseAuthenticator) WithOAuth2(cfg OAuth2Config) *DatabaseAuthenticator {
|
||||
if cfg.ProviderName == "" {
|
||||
cfg.ProviderName = "oauth2"
|
||||
}
|
||||
|
||||
if cfg.UserInfoParser == nil {
|
||||
cfg.UserInfoParser = defaultOAuth2UserInfoParser
|
||||
}
|
||||
|
||||
provider := &OAuth2Provider{
|
||||
config: &oauth2.Config{
|
||||
ClientID: cfg.ClientID,
|
||||
ClientSecret: cfg.ClientSecret,
|
||||
RedirectURL: cfg.RedirectURL,
|
||||
Scopes: cfg.Scopes,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: cfg.AuthURL,
|
||||
TokenURL: cfg.TokenURL,
|
||||
},
|
||||
},
|
||||
userInfoURL: cfg.UserInfoURL,
|
||||
userInfoParser: cfg.UserInfoParser,
|
||||
providerName: cfg.ProviderName,
|
||||
states: make(map[string]time.Time),
|
||||
}
|
||||
|
||||
// Initialize providers map if needed
|
||||
a.oauth2ProvidersMutex.Lock()
|
||||
if a.oauth2Providers == nil {
|
||||
a.oauth2Providers = make(map[string]*OAuth2Provider)
|
||||
}
|
||||
|
||||
// Register provider
|
||||
a.oauth2Providers[cfg.ProviderName] = provider
|
||||
a.oauth2ProvidersMutex.Unlock()
|
||||
|
||||
// Start state cleanup goroutine for this provider
|
||||
go provider.cleanupStates()
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// OAuth2GetAuthURL returns the OAuth2 authorization URL for redirecting users
|
||||
func (a *DatabaseAuthenticator) OAuth2GetAuthURL(providerName, state string) (string, error) {
|
||||
provider, err := a.getOAuth2Provider(providerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Store state for validation
|
||||
provider.statesMutex.Lock()
|
||||
provider.states[state] = time.Now().Add(10 * time.Minute)
|
||||
provider.statesMutex.Unlock()
|
||||
|
||||
return provider.config.AuthCodeURL(state), nil
|
||||
}
|
||||
|
||||
// OAuth2GenerateState generates a random state string for CSRF protection
|
||||
func (a *DatabaseAuthenticator) OAuth2GenerateState() (string, error) {
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// OAuth2HandleCallback handles the OAuth2 callback and exchanges code for token
|
||||
func (a *DatabaseAuthenticator) OAuth2HandleCallback(ctx context.Context, providerName, code, state string) (*LoginResponse, error) {
|
||||
provider, err := a.getOAuth2Provider(providerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate state
|
||||
if !provider.validateState(state) {
|
||||
return nil, fmt.Errorf("invalid state parameter")
|
||||
}
|
||||
|
||||
// Exchange code for token
|
||||
token, err := provider.config.Exchange(ctx, code)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to exchange code: %w", err)
|
||||
}
|
||||
|
||||
// Fetch user info
|
||||
client := provider.config.Client(ctx, token)
|
||||
resp, err := client.Get(provider.userInfoURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user info: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
var userInfo map[string]any
|
||||
if err := json.Unmarshal(body, &userInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse user info: %w", err)
|
||||
}
|
||||
|
||||
// Parse user info
|
||||
userCtx, err := provider.userInfoParser(userInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse user context: %w", err)
|
||||
}
|
||||
|
||||
// Get or create user in database
|
||||
userID, err := a.oauth2GetOrCreateUser(ctx, userCtx, providerName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get or create user: %w", err)
|
||||
}
|
||||
userCtx.UserID = userID
|
||||
|
||||
// Create session token
|
||||
sessionToken, err := a.OAuth2GenerateState()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate session token: %w", err)
|
||||
}
|
||||
|
||||
expiresAt := time.Now().Add(24 * time.Hour)
|
||||
if token.Expiry.After(time.Now()) {
|
||||
expiresAt = token.Expiry
|
||||
}
|
||||
|
||||
// Store session in database
|
||||
err = a.oauth2CreateSession(ctx, sessionToken, userCtx.UserID, token, expiresAt, providerName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create session: %w", err)
|
||||
}
|
||||
|
||||
userCtx.SessionID = sessionToken
|
||||
|
||||
return &LoginResponse{
|
||||
Token: sessionToken,
|
||||
RefreshToken: token.RefreshToken,
|
||||
User: userCtx,
|
||||
ExpiresIn: int64(time.Until(expiresAt).Seconds()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// OAuth2GetProviders returns list of configured OAuth2 provider names
|
||||
func (a *DatabaseAuthenticator) OAuth2GetProviders() []string {
|
||||
a.oauth2ProvidersMutex.RLock()
|
||||
defer a.oauth2ProvidersMutex.RUnlock()
|
||||
|
||||
if a.oauth2Providers == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
providers := make([]string, 0, len(a.oauth2Providers))
|
||||
for name := range a.oauth2Providers {
|
||||
providers = append(providers, name)
|
||||
}
|
||||
return providers
|
||||
}
|
||||
|
||||
// getOAuth2Provider retrieves a registered OAuth2 provider by name
|
||||
func (a *DatabaseAuthenticator) getOAuth2Provider(providerName string) (*OAuth2Provider, error) {
|
||||
a.oauth2ProvidersMutex.RLock()
|
||||
defer a.oauth2ProvidersMutex.RUnlock()
|
||||
|
||||
if a.oauth2Providers == nil {
|
||||
return nil, fmt.Errorf("OAuth2 not configured - call WithOAuth2() first")
|
||||
}
|
||||
|
||||
provider, ok := a.oauth2Providers[providerName]
|
||||
if !ok {
|
||||
// Build provider list without calling OAuth2GetProviders to avoid recursion
|
||||
providerNames := make([]string, 0, len(a.oauth2Providers))
|
||||
for name := range a.oauth2Providers {
|
||||
providerNames = append(providerNames, name)
|
||||
}
|
||||
return nil, fmt.Errorf("OAuth2 provider '%s' not found - available providers: %v", providerName, providerNames)
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
// oauth2GetOrCreateUser finds or creates a user based on OAuth2 info using stored procedure
|
||||
func (a *DatabaseAuthenticator) oauth2GetOrCreateUser(ctx context.Context, userCtx *UserContext, providerName string) (int, error) {
|
||||
userData := map[string]interface{}{
|
||||
"username": userCtx.UserName,
|
||||
"email": userCtx.Email,
|
||||
"remote_id": userCtx.RemoteID,
|
||||
"user_level": userCtx.UserLevel,
|
||||
"roles": userCtx.Roles,
|
||||
"auth_provider": providerName,
|
||||
}
|
||||
|
||||
userJSON, err := json.Marshal(userData)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to marshal user data: %w", err)
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errMsg *string
|
||||
var userID *int
|
||||
|
||||
err = a.getDB().QueryRowContext(ctx, fmt.Sprintf(`
|
||||
SELECT p_success, p_error, p_user_id
|
||||
FROM %s($1::jsonb)
|
||||
`, a.sqlNames.OAuthGetOrCreateUser), userJSON).Scan(&success, &errMsg, &userID)
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get or create user: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errMsg != nil {
|
||||
return 0, fmt.Errorf("%s", *errMsg)
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get or create user")
|
||||
}
|
||||
|
||||
if userID == nil {
|
||||
return 0, fmt.Errorf("user ID not returned")
|
||||
}
|
||||
|
||||
return *userID, nil
|
||||
}
|
||||
|
||||
// oauth2CreateSession creates a new OAuth2 session using stored procedure
|
||||
func (a *DatabaseAuthenticator) oauth2CreateSession(ctx context.Context, sessionToken string, userID int, token *oauth2.Token, expiresAt time.Time, providerName string) error {
|
||||
sessionData := map[string]interface{}{
|
||||
"session_token": sessionToken,
|
||||
"user_id": userID,
|
||||
"access_token": token.AccessToken,
|
||||
"refresh_token": token.RefreshToken,
|
||||
"token_type": token.TokenType,
|
||||
"expires_at": expiresAt,
|
||||
"auth_provider": providerName,
|
||||
}
|
||||
|
||||
sessionJSON, err := json.Marshal(sessionData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal session data: %w", err)
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errMsg *string
|
||||
|
||||
err = a.getDB().QueryRowContext(ctx, fmt.Sprintf(`
|
||||
SELECT p_success, p_error
|
||||
FROM %s($1::jsonb)
|
||||
`, a.sqlNames.OAuthCreateSession), sessionJSON).Scan(&success, &errMsg)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create session: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errMsg != nil {
|
||||
return fmt.Errorf("%s", *errMsg)
|
||||
}
|
||||
return fmt.Errorf("failed to create session")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateState validates state using in-memory storage
|
||||
func (p *OAuth2Provider) validateState(state string) bool {
|
||||
p.statesMutex.Lock()
|
||||
defer p.statesMutex.Unlock()
|
||||
|
||||
expiry, ok := p.states[state]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if time.Now().After(expiry) {
|
||||
delete(p.states, state)
|
||||
return false
|
||||
}
|
||||
|
||||
delete(p.states, state) // One-time use
|
||||
return true
|
||||
}
|
||||
|
||||
// cleanupStates removes expired states periodically
|
||||
func (p *OAuth2Provider) cleanupStates() {
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
p.statesMutex.Lock()
|
||||
now := time.Now()
|
||||
for state, expiry := range p.states {
|
||||
if now.After(expiry) {
|
||||
delete(p.states, state)
|
||||
}
|
||||
}
|
||||
p.statesMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// defaultOAuth2UserInfoParser parses standard OAuth2 user info claims
|
||||
func defaultOAuth2UserInfoParser(userInfo map[string]any) (*UserContext, error) {
|
||||
ctx := &UserContext{
|
||||
Claims: userInfo,
|
||||
Roles: []string{"user"},
|
||||
}
|
||||
|
||||
// Extract standard claims
|
||||
if sub, ok := userInfo["sub"].(string); ok {
|
||||
ctx.RemoteID = sub
|
||||
}
|
||||
if email, ok := userInfo["email"].(string); ok {
|
||||
ctx.Email = email
|
||||
// Use email as username if name not available
|
||||
ctx.UserName = strings.Split(email, "@")[0]
|
||||
}
|
||||
if name, ok := userInfo["name"].(string); ok {
|
||||
ctx.UserName = name
|
||||
}
|
||||
if login, ok := userInfo["login"].(string); ok {
|
||||
ctx.UserName = login // GitHub uses "login"
|
||||
}
|
||||
|
||||
if ctx.UserName == "" {
|
||||
return nil, fmt.Errorf("could not extract username from user info")
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// OAuth2RefreshToken refreshes an expired OAuth2 access token using the refresh token
|
||||
// Takes the refresh token and returns a new LoginResponse with updated tokens
|
||||
func (a *DatabaseAuthenticator) OAuth2RefreshToken(ctx context.Context, refreshToken, providerName string) (*LoginResponse, error) {
|
||||
provider, err := a.getOAuth2Provider(providerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get session by refresh token from database
|
||||
var success bool
|
||||
var errMsg *string
|
||||
var sessionData []byte
|
||||
|
||||
err = a.getDB().QueryRowContext(ctx, fmt.Sprintf(`
|
||||
SELECT p_success, p_error, p_data::text
|
||||
FROM %s($1)
|
||||
`, a.sqlNames.OAuthGetRefreshToken), refreshToken).Scan(&success, &errMsg, &sessionData)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get session by refresh token: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errMsg != nil {
|
||||
return nil, fmt.Errorf("%s", *errMsg)
|
||||
}
|
||||
return nil, fmt.Errorf("invalid or expired refresh token")
|
||||
}
|
||||
|
||||
// Parse session data
|
||||
var session struct {
|
||||
UserID int `json:"user_id"`
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
Expiry time.Time `json:"expiry"`
|
||||
}
|
||||
if err := json.Unmarshal(sessionData, &session); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse session data: %w", err)
|
||||
}
|
||||
|
||||
// Create oauth2.Token from stored data
|
||||
oldToken := &oauth2.Token{
|
||||
AccessToken: session.AccessToken,
|
||||
TokenType: session.TokenType,
|
||||
RefreshToken: refreshToken,
|
||||
Expiry: session.Expiry,
|
||||
}
|
||||
|
||||
// Use OAuth2 provider to refresh the token
|
||||
tokenSource := provider.config.TokenSource(ctx, oldToken)
|
||||
newToken, err := tokenSource.Token()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to refresh token with provider: %w", err)
|
||||
}
|
||||
|
||||
// Generate new session token
|
||||
newSessionToken, err := a.OAuth2GenerateState()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new session token: %w", err)
|
||||
}
|
||||
|
||||
// Update session in database with new tokens
|
||||
updateData := map[string]interface{}{
|
||||
"user_id": session.UserID,
|
||||
"old_refresh_token": refreshToken,
|
||||
"new_session_token": newSessionToken,
|
||||
"new_access_token": newToken.AccessToken,
|
||||
"new_refresh_token": newToken.RefreshToken,
|
||||
"expires_at": newToken.Expiry,
|
||||
}
|
||||
|
||||
updateJSON, err := json.Marshal(updateData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal update data: %w", err)
|
||||
}
|
||||
|
||||
var updateSuccess bool
|
||||
var updateErrMsg *string
|
||||
|
||||
err = a.getDB().QueryRowContext(ctx, fmt.Sprintf(`
|
||||
SELECT p_success, p_error
|
||||
FROM %s($1::jsonb)
|
||||
`, a.sqlNames.OAuthUpdateRefreshToken), updateJSON).Scan(&updateSuccess, &updateErrMsg)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update session: %w", err)
|
||||
}
|
||||
|
||||
if !updateSuccess {
|
||||
if updateErrMsg != nil {
|
||||
return nil, fmt.Errorf("%s", *updateErrMsg)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to update session")
|
||||
}
|
||||
|
||||
// Get user data
|
||||
var userSuccess bool
|
||||
var userErrMsg *string
|
||||
var userData []byte
|
||||
|
||||
err = a.getDB().QueryRowContext(ctx, fmt.Sprintf(`
|
||||
SELECT p_success, p_error, p_data::text
|
||||
FROM %s($1)
|
||||
`, a.sqlNames.OAuthGetUser), session.UserID).Scan(&userSuccess, &userErrMsg, &userData)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get user data: %w", err)
|
||||
}
|
||||
|
||||
if !userSuccess {
|
||||
if userErrMsg != nil {
|
||||
return nil, fmt.Errorf("%s", *userErrMsg)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get user data")
|
||||
}
|
||||
|
||||
// Parse user context
|
||||
var userCtx UserContext
|
||||
if err := json.Unmarshal(userData, &userCtx); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse user context: %w", err)
|
||||
}
|
||||
|
||||
userCtx.SessionID = newSessionToken
|
||||
|
||||
return &LoginResponse{
|
||||
Token: newSessionToken,
|
||||
RefreshToken: newToken.RefreshToken,
|
||||
User: &userCtx,
|
||||
ExpiresIn: int64(time.Until(newToken.Expiry).Seconds()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Pre-configured OAuth2 factory methods
|
||||
|
||||
// NewGoogleAuthenticator creates a DatabaseAuthenticator configured for Google OAuth2
|
||||
func NewGoogleAuthenticator(clientID, clientSecret, redirectURL string, db *sql.DB) *DatabaseAuthenticator {
|
||||
auth := NewDatabaseAuthenticator(db)
|
||||
return auth.WithOAuth2(OAuth2Config{
|
||||
ClientID: clientID,
|
||||
ClientSecret: clientSecret,
|
||||
RedirectURL: redirectURL,
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://oauth2.googleapis.com/token",
|
||||
UserInfoURL: "https://www.googleapis.com/oauth2/v2/userinfo",
|
||||
ProviderName: "google",
|
||||
})
|
||||
}
|
||||
|
||||
// NewGitHubAuthenticator creates a DatabaseAuthenticator configured for GitHub OAuth2
|
||||
func NewGitHubAuthenticator(clientID, clientSecret, redirectURL string, db *sql.DB) *DatabaseAuthenticator {
|
||||
auth := NewDatabaseAuthenticator(db)
|
||||
return auth.WithOAuth2(OAuth2Config{
|
||||
ClientID: clientID,
|
||||
ClientSecret: clientSecret,
|
||||
RedirectURL: redirectURL,
|
||||
Scopes: []string{"user:email"},
|
||||
AuthURL: "https://github.com/login/oauth/authorize",
|
||||
TokenURL: "https://github.com/login/oauth/access_token",
|
||||
UserInfoURL: "https://api.github.com/user",
|
||||
ProviderName: "github",
|
||||
})
|
||||
}
|
||||
|
||||
// NewMicrosoftAuthenticator creates a DatabaseAuthenticator configured for Microsoft OAuth2
|
||||
func NewMicrosoftAuthenticator(clientID, clientSecret, redirectURL string, db *sql.DB) *DatabaseAuthenticator {
|
||||
auth := NewDatabaseAuthenticator(db)
|
||||
return auth.WithOAuth2(OAuth2Config{
|
||||
ClientID: clientID,
|
||||
ClientSecret: clientSecret,
|
||||
RedirectURL: redirectURL,
|
||||
Scopes: []string{"openid", "profile", "email"},
|
||||
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
|
||||
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
|
||||
UserInfoURL: "https://graph.microsoft.com/v1.0/me",
|
||||
ProviderName: "microsoft",
|
||||
})
|
||||
}
|
||||
|
||||
// NewFacebookAuthenticator creates a DatabaseAuthenticator configured for Facebook OAuth2
|
||||
func NewFacebookAuthenticator(clientID, clientSecret, redirectURL string, db *sql.DB) *DatabaseAuthenticator {
|
||||
auth := NewDatabaseAuthenticator(db)
|
||||
return auth.WithOAuth2(OAuth2Config{
|
||||
ClientID: clientID,
|
||||
ClientSecret: clientSecret,
|
||||
RedirectURL: redirectURL,
|
||||
Scopes: []string{"email"},
|
||||
AuthURL: "https://www.facebook.com/v12.0/dialog/oauth",
|
||||
TokenURL: "https://graph.facebook.com/v12.0/oauth/access_token",
|
||||
UserInfoURL: "https://graph.facebook.com/me?fields=id,name,email",
|
||||
ProviderName: "facebook",
|
||||
})
|
||||
}
|
||||
|
||||
// NewMultiProviderAuthenticator creates a DatabaseAuthenticator with all major OAuth2 providers configured
|
||||
func NewMultiProviderAuthenticator(db *sql.DB, configs map[string]OAuth2Config) *DatabaseAuthenticator {
|
||||
auth := NewDatabaseAuthenticator(db)
|
||||
|
||||
//nolint:gocritic // OAuth2Config is copied but kept for API simplicity
|
||||
for _, cfg := range configs {
|
||||
auth.WithOAuth2(cfg)
|
||||
}
|
||||
|
||||
return auth
|
||||
}
|
||||
185
pkg/security/passkey.go
Normal file
185
pkg/security/passkey.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PasskeyCredential represents a stored WebAuthn/FIDO2 credential
|
||||
type PasskeyCredential struct {
|
||||
ID string `json:"id"`
|
||||
UserID int `json:"user_id"`
|
||||
CredentialID []byte `json:"credential_id"` // Raw credential ID from authenticator
|
||||
PublicKey []byte `json:"public_key"` // COSE public key
|
||||
AttestationType string `json:"attestation_type"` // none, indirect, direct
|
||||
AAGUID []byte `json:"aaguid"` // Authenticator AAGUID
|
||||
SignCount uint32 `json:"sign_count"` // Signature counter
|
||||
CloneWarning bool `json:"clone_warning"` // True if cloning detected
|
||||
Transports []string `json:"transports,omitempty"` // usb, nfc, ble, internal
|
||||
BackupEligible bool `json:"backup_eligible"` // Credential can be backed up
|
||||
BackupState bool `json:"backup_state"` // Credential is currently backed up
|
||||
Name string `json:"name,omitempty"` // User-friendly name
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LastUsedAt time.Time `json:"last_used_at"`
|
||||
}
|
||||
|
||||
// PasskeyRegistrationOptions contains options for beginning passkey registration
|
||||
type PasskeyRegistrationOptions struct {
|
||||
Challenge []byte `json:"challenge"`
|
||||
RelyingParty PasskeyRelyingParty `json:"rp"`
|
||||
User PasskeyUser `json:"user"`
|
||||
PubKeyCredParams []PasskeyCredentialParam `json:"pubKeyCredParams"`
|
||||
Timeout int64 `json:"timeout,omitempty"` // Milliseconds
|
||||
ExcludeCredentials []PasskeyCredentialDescriptor `json:"excludeCredentials,omitempty"`
|
||||
AuthenticatorSelection *PasskeyAuthenticatorSelection `json:"authenticatorSelection,omitempty"`
|
||||
Attestation string `json:"attestation,omitempty"` // none, indirect, direct, enterprise
|
||||
Extensions map[string]any `json:"extensions,omitempty"`
|
||||
}
|
||||
|
||||
// PasskeyAuthenticationOptions contains options for beginning passkey authentication
|
||||
type PasskeyAuthenticationOptions struct {
|
||||
Challenge []byte `json:"challenge"`
|
||||
Timeout int64 `json:"timeout,omitempty"`
|
||||
RelyingPartyID string `json:"rpId,omitempty"`
|
||||
AllowCredentials []PasskeyCredentialDescriptor `json:"allowCredentials,omitempty"`
|
||||
UserVerification string `json:"userVerification,omitempty"` // required, preferred, discouraged
|
||||
Extensions map[string]any `json:"extensions,omitempty"`
|
||||
}
|
||||
|
||||
// PasskeyRelyingParty identifies the relying party
|
||||
type PasskeyRelyingParty struct {
|
||||
ID string `json:"id"` // Domain (e.g., "example.com")
|
||||
Name string `json:"name"` // Display name
|
||||
}
|
||||
|
||||
// PasskeyUser identifies the user
|
||||
type PasskeyUser struct {
|
||||
ID []byte `json:"id"` // User handle (unique, persistent)
|
||||
Name string `json:"name"` // Username
|
||||
DisplayName string `json:"displayName"` // Display name
|
||||
}
|
||||
|
||||
// PasskeyCredentialParam specifies supported public key algorithm
|
||||
type PasskeyCredentialParam struct {
|
||||
Type string `json:"type"` // "public-key"
|
||||
Alg int `json:"alg"` // COSE algorithm identifier (e.g., -7 for ES256, -257 for RS256)
|
||||
}
|
||||
|
||||
// PasskeyCredentialDescriptor describes a credential
|
||||
type PasskeyCredentialDescriptor struct {
|
||||
Type string `json:"type"` // "public-key"
|
||||
ID []byte `json:"id"` // Credential ID
|
||||
Transports []string `json:"transports,omitempty"` // usb, nfc, ble, internal
|
||||
}
|
||||
|
||||
// PasskeyAuthenticatorSelection specifies authenticator requirements
|
||||
type PasskeyAuthenticatorSelection struct {
|
||||
AuthenticatorAttachment string `json:"authenticatorAttachment,omitempty"` // platform, cross-platform
|
||||
RequireResidentKey bool `json:"requireResidentKey,omitempty"`
|
||||
ResidentKey string `json:"residentKey,omitempty"` // discouraged, preferred, required
|
||||
UserVerification string `json:"userVerification,omitempty"` // required, preferred, discouraged
|
||||
}
|
||||
|
||||
// PasskeyRegistrationResponse contains the client's registration response
|
||||
type PasskeyRegistrationResponse struct {
|
||||
ID string `json:"id"` // Base64URL encoded credential ID
|
||||
RawID []byte `json:"rawId"` // Raw credential ID
|
||||
Type string `json:"type"` // "public-key"
|
||||
Response PasskeyAuthenticatorAttestationResponse `json:"response"`
|
||||
ClientExtensionResults map[string]any `json:"clientExtensionResults,omitempty"`
|
||||
Transports []string `json:"transports,omitempty"`
|
||||
}
|
||||
|
||||
// PasskeyAuthenticatorAttestationResponse contains attestation data
|
||||
type PasskeyAuthenticatorAttestationResponse struct {
|
||||
ClientDataJSON []byte `json:"clientDataJSON"`
|
||||
AttestationObject []byte `json:"attestationObject"`
|
||||
Transports []string `json:"transports,omitempty"`
|
||||
}
|
||||
|
||||
// PasskeyAuthenticationResponse contains the client's authentication response
|
||||
type PasskeyAuthenticationResponse struct {
|
||||
ID string `json:"id"` // Base64URL encoded credential ID
|
||||
RawID []byte `json:"rawId"` // Raw credential ID
|
||||
Type string `json:"type"` // "public-key"
|
||||
Response PasskeyAuthenticatorAssertionResponse `json:"response"`
|
||||
ClientExtensionResults map[string]any `json:"clientExtensionResults,omitempty"`
|
||||
}
|
||||
|
||||
// PasskeyAuthenticatorAssertionResponse contains assertion data
|
||||
type PasskeyAuthenticatorAssertionResponse struct {
|
||||
ClientDataJSON []byte `json:"clientDataJSON"`
|
||||
AuthenticatorData []byte `json:"authenticatorData"`
|
||||
Signature []byte `json:"signature"`
|
||||
UserHandle []byte `json:"userHandle,omitempty"`
|
||||
}
|
||||
|
||||
// PasskeyProvider handles passkey registration and authentication
|
||||
type PasskeyProvider interface {
|
||||
// BeginRegistration creates registration options for a new passkey
|
||||
BeginRegistration(ctx context.Context, userID int, username, displayName string) (*PasskeyRegistrationOptions, error)
|
||||
|
||||
// CompleteRegistration verifies and stores a new passkey credential
|
||||
CompleteRegistration(ctx context.Context, userID int, response PasskeyRegistrationResponse, expectedChallenge []byte) (*PasskeyCredential, error)
|
||||
|
||||
// BeginAuthentication creates authentication options for passkey login
|
||||
BeginAuthentication(ctx context.Context, username string) (*PasskeyAuthenticationOptions, error)
|
||||
|
||||
// CompleteAuthentication verifies a passkey assertion and returns the user
|
||||
CompleteAuthentication(ctx context.Context, response PasskeyAuthenticationResponse, expectedChallenge []byte) (int, error)
|
||||
|
||||
// GetCredentials returns all passkey credentials for a user
|
||||
GetCredentials(ctx context.Context, userID int) ([]PasskeyCredential, error)
|
||||
|
||||
// DeleteCredential removes a passkey credential
|
||||
DeleteCredential(ctx context.Context, userID int, credentialID string) error
|
||||
|
||||
// UpdateCredentialName updates the friendly name of a credential
|
||||
UpdateCredentialName(ctx context.Context, userID int, credentialID string, name string) error
|
||||
}
|
||||
|
||||
// PasskeyLoginRequest contains passkey authentication data
|
||||
type PasskeyLoginRequest struct {
|
||||
Response PasskeyAuthenticationResponse `json:"response"`
|
||||
ExpectedChallenge []byte `json:"expected_challenge"`
|
||||
Claims map[string]any `json:"claims"` // Additional login data
|
||||
}
|
||||
|
||||
// PasskeyRegisterRequest contains passkey registration data
|
||||
type PasskeyRegisterRequest struct {
|
||||
UserID int `json:"user_id"`
|
||||
Response PasskeyRegistrationResponse `json:"response"`
|
||||
ExpectedChallenge []byte `json:"expected_challenge"`
|
||||
CredentialName string `json:"credential_name,omitempty"`
|
||||
}
|
||||
|
||||
// PasskeyBeginRegistrationRequest contains options for starting passkey registration
|
||||
type PasskeyBeginRegistrationRequest struct {
|
||||
UserID int `json:"user_id"`
|
||||
Username string `json:"username"`
|
||||
DisplayName string `json:"display_name"`
|
||||
}
|
||||
|
||||
// PasskeyBeginAuthenticationRequest contains options for starting passkey authentication
|
||||
type PasskeyBeginAuthenticationRequest struct {
|
||||
Username string `json:"username,omitempty"` // Optional for resident key flow
|
||||
}
|
||||
|
||||
// ParsePasskeyRegistrationResponse parses a JSON passkey registration response
|
||||
func ParsePasskeyRegistrationResponse(data []byte) (*PasskeyRegistrationResponse, error) {
|
||||
var response PasskeyRegistrationResponse
|
||||
if err := json.Unmarshal(data, &response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// ParsePasskeyAuthenticationResponse parses a JSON passkey authentication response
|
||||
func ParsePasskeyAuthenticationResponse(data []byte) (*PasskeyAuthenticationResponse, error) {
|
||||
var response PasskeyAuthenticationResponse
|
||||
if err := json.Unmarshal(data, &response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &response, nil
|
||||
}
|
||||
432
pkg/security/passkey_examples.go
Normal file
432
pkg/security/passkey_examples.go
Normal file
@@ -0,0 +1,432 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// PasskeyAuthenticationExample demonstrates passkey (WebAuthn/FIDO2) authentication
|
||||
func PasskeyAuthenticationExample() {
|
||||
// Setup database connection
|
||||
db, _ := sql.Open("postgres", "postgres://user:pass@localhost/db")
|
||||
|
||||
// Create passkey provider
|
||||
passkeyProvider := NewDatabasePasskeyProvider(db, DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com", // Your domain
|
||||
RPName: "Example Application", // Display name
|
||||
RPOrigin: "https://example.com", // Expected origin
|
||||
Timeout: 60000, // 60 seconds
|
||||
})
|
||||
|
||||
// Create authenticator with passkey support
|
||||
// Option 1: Pass during creation
|
||||
_ = NewDatabaseAuthenticatorWithOptions(db, DatabaseAuthenticatorOptions{
|
||||
PasskeyProvider: passkeyProvider,
|
||||
})
|
||||
|
||||
// Option 2: Use WithPasskey method
|
||||
auth := NewDatabaseAuthenticator(db).WithPasskey(passkeyProvider)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// === REGISTRATION FLOW ===
|
||||
|
||||
// Step 1: Begin registration
|
||||
regOptions, _ := auth.BeginPasskeyRegistration(ctx, PasskeyBeginRegistrationRequest{
|
||||
UserID: 1,
|
||||
Username: "alice",
|
||||
DisplayName: "Alice Smith",
|
||||
})
|
||||
|
||||
// Send regOptions to client as JSON
|
||||
// Client will call navigator.credentials.create() with these options
|
||||
_ = regOptions
|
||||
|
||||
// Step 2: Complete registration (after client returns credential)
|
||||
// This would come from the client's navigator.credentials.create() response
|
||||
clientResponse := PasskeyRegistrationResponse{
|
||||
ID: "base64-credential-id",
|
||||
RawID: []byte("raw-credential-id"),
|
||||
Type: "public-key",
|
||||
Response: PasskeyAuthenticatorAttestationResponse{
|
||||
ClientDataJSON: []byte("..."),
|
||||
AttestationObject: []byte("..."),
|
||||
},
|
||||
Transports: []string{"internal"},
|
||||
}
|
||||
|
||||
credential, _ := auth.CompletePasskeyRegistration(ctx, PasskeyRegisterRequest{
|
||||
UserID: 1,
|
||||
Response: clientResponse,
|
||||
ExpectedChallenge: regOptions.Challenge,
|
||||
CredentialName: "My iPhone",
|
||||
})
|
||||
|
||||
fmt.Printf("Registered credential: %s\n", credential.ID)
|
||||
|
||||
// === AUTHENTICATION FLOW ===
|
||||
|
||||
// Step 1: Begin authentication
|
||||
authOptions, _ := auth.BeginPasskeyAuthentication(ctx, PasskeyBeginAuthenticationRequest{
|
||||
Username: "alice", // Optional - omit for resident key flow
|
||||
})
|
||||
|
||||
// Send authOptions to client as JSON
|
||||
// Client will call navigator.credentials.get() with these options
|
||||
_ = authOptions
|
||||
|
||||
// Step 2: Complete authentication (after client returns assertion)
|
||||
// This would come from the client's navigator.credentials.get() response
|
||||
clientAssertion := PasskeyAuthenticationResponse{
|
||||
ID: "base64-credential-id",
|
||||
RawID: []byte("raw-credential-id"),
|
||||
Type: "public-key",
|
||||
Response: PasskeyAuthenticatorAssertionResponse{
|
||||
ClientDataJSON: []byte("..."),
|
||||
AuthenticatorData: []byte("..."),
|
||||
Signature: []byte("..."),
|
||||
},
|
||||
}
|
||||
|
||||
loginResponse, _ := auth.LoginWithPasskey(ctx, PasskeyLoginRequest{
|
||||
Response: clientAssertion,
|
||||
ExpectedChallenge: authOptions.Challenge,
|
||||
Claims: map[string]any{
|
||||
"ip_address": "192.168.1.1",
|
||||
"user_agent": "Mozilla/5.0...",
|
||||
},
|
||||
})
|
||||
|
||||
fmt.Printf("Logged in user: %s with token: %s\n",
|
||||
loginResponse.User.UserName, loginResponse.Token)
|
||||
|
||||
// === CREDENTIAL MANAGEMENT ===
|
||||
|
||||
// Get all credentials for a user
|
||||
credentials, _ := auth.GetPasskeyCredentials(ctx, 1)
|
||||
for i := range credentials {
|
||||
fmt.Printf("Credential: %s (created: %s, last used: %s)\n",
|
||||
credentials[i].Name, credentials[i].CreatedAt, credentials[i].LastUsedAt)
|
||||
}
|
||||
|
||||
// Update credential name
|
||||
_ = auth.UpdatePasskeyCredentialName(ctx, 1, credential.ID, "My New iPhone")
|
||||
|
||||
// Delete credential
|
||||
_ = auth.DeletePasskeyCredential(ctx, 1, credential.ID)
|
||||
}
|
||||
|
||||
// PasskeyHTTPHandlersExample shows HTTP handlers for passkey authentication
|
||||
func PasskeyHTTPHandlersExample(auth *DatabaseAuthenticator) {
|
||||
// Store challenges in session/cache in production
|
||||
challenges := make(map[string][]byte)
|
||||
|
||||
// Begin registration endpoint
|
||||
http.HandleFunc("/api/passkey/register/begin", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
UserID int `json:"user_id"`
|
||||
Username string `json:"username"`
|
||||
DisplayName string `json:"display_name"`
|
||||
}
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
options, err := auth.BeginPasskeyRegistration(r.Context(), PasskeyBeginRegistrationRequest{
|
||||
UserID: req.UserID,
|
||||
Username: req.Username,
|
||||
DisplayName: req.DisplayName,
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Store challenge for verification (use session ID as key in production)
|
||||
sessionID := "session-123"
|
||||
challenges[sessionID] = options.Challenge
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(options)
|
||||
})
|
||||
|
||||
// Complete registration endpoint
|
||||
http.HandleFunc("/api/passkey/register/complete", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
UserID int `json:"user_id"`
|
||||
Response PasskeyRegistrationResponse `json:"response"`
|
||||
CredentialName string `json:"credential_name"`
|
||||
}
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
// Get stored challenge (from session in production)
|
||||
sessionID := "session-123"
|
||||
challenge := challenges[sessionID]
|
||||
delete(challenges, sessionID)
|
||||
|
||||
credential, err := auth.CompletePasskeyRegistration(r.Context(), PasskeyRegisterRequest{
|
||||
UserID: req.UserID,
|
||||
Response: req.Response,
|
||||
ExpectedChallenge: challenge,
|
||||
CredentialName: req.CredentialName,
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(credential)
|
||||
})
|
||||
|
||||
// Begin authentication endpoint
|
||||
http.HandleFunc("/api/passkey/login/begin", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Username string `json:"username"` // Optional
|
||||
}
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
options, err := auth.BeginPasskeyAuthentication(r.Context(), PasskeyBeginAuthenticationRequest{
|
||||
Username: req.Username,
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Store challenge for verification (use session ID as key in production)
|
||||
sessionID := "session-456"
|
||||
challenges[sessionID] = options.Challenge
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(options)
|
||||
})
|
||||
|
||||
// Complete authentication endpoint
|
||||
http.HandleFunc("/api/passkey/login/complete", func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Response PasskeyAuthenticationResponse `json:"response"`
|
||||
}
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
// Get stored challenge (from session in production)
|
||||
sessionID := "session-456"
|
||||
challenge := challenges[sessionID]
|
||||
delete(challenges, sessionID)
|
||||
|
||||
loginResponse, err := auth.LoginWithPasskey(r.Context(), PasskeyLoginRequest{
|
||||
Response: req.Response,
|
||||
ExpectedChallenge: challenge,
|
||||
Claims: map[string]any{
|
||||
"ip_address": r.RemoteAddr,
|
||||
"user_agent": r.UserAgent(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Set session cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "session_token",
|
||||
Value: loginResponse.Token,
|
||||
Path: "/",
|
||||
HttpOnly: true,
|
||||
Secure: true,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
})
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(loginResponse)
|
||||
})
|
||||
|
||||
// List credentials endpoint
|
||||
http.HandleFunc("/api/passkey/credentials", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Get user from authenticated session
|
||||
userCtx, err := auth.Authenticate(r)
|
||||
if err != nil {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
credentials, err := auth.GetPasskeyCredentials(r.Context(), userCtx.UserID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(credentials)
|
||||
})
|
||||
|
||||
// Delete credential endpoint
|
||||
http.HandleFunc("/api/passkey/credentials/delete", func(w http.ResponseWriter, r *http.Request) {
|
||||
userCtx, err := auth.Authenticate(r)
|
||||
if err != nil {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
var req struct {
|
||||
CredentialID string `json:"credential_id"`
|
||||
}
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
err = auth.DeletePasskeyCredential(r.Context(), userCtx.UserID, req.CredentialID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
})
|
||||
}
|
||||
|
||||
// PasskeyClientSideExample shows the client-side JavaScript code needed
|
||||
func PasskeyClientSideExample() string {
|
||||
return `
|
||||
// === CLIENT-SIDE JAVASCRIPT FOR PASSKEY AUTHENTICATION ===
|
||||
|
||||
// Helper function to convert base64 to ArrayBuffer
|
||||
function base64ToArrayBuffer(base64) {
|
||||
const binary = atob(base64);
|
||||
const bytes = new Uint8Array(binary.length);
|
||||
for (let i = 0; i < binary.length; i++) {
|
||||
bytes[i] = binary.charCodeAt(i);
|
||||
}
|
||||
return bytes.buffer;
|
||||
}
|
||||
|
||||
// Helper function to convert ArrayBuffer to base64
|
||||
function arrayBufferToBase64(buffer) {
|
||||
const bytes = new Uint8Array(buffer);
|
||||
let binary = '';
|
||||
for (let i = 0; i < bytes.length; i++) {
|
||||
binary += String.fromCharCode(bytes[i]);
|
||||
}
|
||||
return btoa(binary);
|
||||
}
|
||||
|
||||
// === REGISTRATION ===
|
||||
|
||||
async function registerPasskey(userId, username, displayName) {
|
||||
// Step 1: Get registration options from server
|
||||
const optionsResponse = await fetch('/api/passkey/register/begin', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ user_id: userId, username, display_name: displayName })
|
||||
});
|
||||
const options = await optionsResponse.json();
|
||||
|
||||
// Convert base64 strings to ArrayBuffers
|
||||
options.challenge = base64ToArrayBuffer(options.challenge);
|
||||
options.user.id = base64ToArrayBuffer(options.user.id);
|
||||
if (options.excludeCredentials) {
|
||||
options.excludeCredentials = options.excludeCredentials.map(cred => ({
|
||||
...cred,
|
||||
id: base64ToArrayBuffer(cred.id)
|
||||
}));
|
||||
}
|
||||
|
||||
// Step 2: Create credential using WebAuthn API
|
||||
const credential = await navigator.credentials.create({
|
||||
publicKey: options
|
||||
});
|
||||
|
||||
// Step 3: Send credential to server
|
||||
const credentialResponse = {
|
||||
id: credential.id,
|
||||
rawId: arrayBufferToBase64(credential.rawId),
|
||||
type: credential.type,
|
||||
response: {
|
||||
clientDataJSON: arrayBufferToBase64(credential.response.clientDataJSON),
|
||||
attestationObject: arrayBufferToBase64(credential.response.attestationObject)
|
||||
},
|
||||
transports: credential.response.getTransports ? credential.response.getTransports() : []
|
||||
};
|
||||
|
||||
const completeResponse = await fetch('/api/passkey/register/complete', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
user_id: userId,
|
||||
response: credentialResponse,
|
||||
credential_name: 'My Device'
|
||||
})
|
||||
});
|
||||
|
||||
return await completeResponse.json();
|
||||
}
|
||||
|
||||
// === AUTHENTICATION ===
|
||||
|
||||
async function loginWithPasskey(username) {
|
||||
// Step 1: Get authentication options from server
|
||||
const optionsResponse = await fetch('/api/passkey/login/begin', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ username })
|
||||
});
|
||||
const options = await optionsResponse.json();
|
||||
|
||||
// Convert base64 strings to ArrayBuffers
|
||||
options.challenge = base64ToArrayBuffer(options.challenge);
|
||||
if (options.allowCredentials) {
|
||||
options.allowCredentials = options.allowCredentials.map(cred => ({
|
||||
...cred,
|
||||
id: base64ToArrayBuffer(cred.id)
|
||||
}));
|
||||
}
|
||||
|
||||
// Step 2: Get credential using WebAuthn API
|
||||
const credential = await navigator.credentials.get({
|
||||
publicKey: options
|
||||
});
|
||||
|
||||
// Step 3: Send assertion to server
|
||||
const assertionResponse = {
|
||||
id: credential.id,
|
||||
rawId: arrayBufferToBase64(credential.rawId),
|
||||
type: credential.type,
|
||||
response: {
|
||||
clientDataJSON: arrayBufferToBase64(credential.response.clientDataJSON),
|
||||
authenticatorData: arrayBufferToBase64(credential.response.authenticatorData),
|
||||
signature: arrayBufferToBase64(credential.response.signature),
|
||||
userHandle: credential.response.userHandle ? arrayBufferToBase64(credential.response.userHandle) : null
|
||||
}
|
||||
};
|
||||
|
||||
const loginResponse = await fetch('/api/passkey/login/complete', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ response: assertionResponse })
|
||||
});
|
||||
|
||||
return await loginResponse.json();
|
||||
}
|
||||
|
||||
// === USAGE ===
|
||||
|
||||
// Register a new passkey
|
||||
document.getElementById('register-btn').addEventListener('click', async () => {
|
||||
try {
|
||||
const result = await registerPasskey(1, 'alice', 'Alice Smith');
|
||||
console.log('Passkey registered:', result);
|
||||
} catch (error) {
|
||||
console.error('Registration failed:', error);
|
||||
}
|
||||
});
|
||||
|
||||
// Login with passkey
|
||||
document.getElementById('login-btn').addEventListener('click', async () => {
|
||||
try {
|
||||
const result = await loginWithPasskey('alice');
|
||||
console.log('Logged in:', result);
|
||||
} catch (error) {
|
||||
console.error('Login failed:', error);
|
||||
}
|
||||
});
|
||||
`
|
||||
}
|
||||
447
pkg/security/passkey_provider.go
Normal file
447
pkg/security/passkey_provider.go
Normal file
@@ -0,0 +1,447 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DatabasePasskeyProvider implements PasskeyProvider using database storage
|
||||
// Procedure names are configurable via SQLNames (see DefaultSQLNames for defaults)
|
||||
type DatabasePasskeyProvider struct {
|
||||
db *sql.DB
|
||||
dbMu sync.RWMutex
|
||||
dbFactory func() (*sql.DB, error)
|
||||
rpID string // Relying Party ID (domain)
|
||||
rpName string // Relying Party display name
|
||||
rpOrigin string // Expected origin for WebAuthn
|
||||
timeout int64 // Timeout in milliseconds (default: 60000)
|
||||
sqlNames *SQLNames
|
||||
}
|
||||
|
||||
// DatabasePasskeyProviderOptions configures the passkey provider
|
||||
type DatabasePasskeyProviderOptions struct {
|
||||
// RPID is the Relying Party ID (typically your domain, e.g., "example.com")
|
||||
RPID string
|
||||
// RPName is the display name for your relying party
|
||||
RPName string
|
||||
// RPOrigin is the expected origin (e.g., "https://example.com")
|
||||
RPOrigin string
|
||||
// Timeout is the timeout for operations in milliseconds (default: 60000)
|
||||
Timeout int64
|
||||
// SQLNames provides custom SQL procedure/function names. If nil, uses DefaultSQLNames().
|
||||
SQLNames *SQLNames
|
||||
// DBFactory is called to obtain a fresh *sql.DB when the existing connection is closed.
|
||||
// If nil, reconnection is disabled.
|
||||
DBFactory func() (*sql.DB, error)
|
||||
}
|
||||
|
||||
// NewDatabasePasskeyProvider creates a new database-backed passkey provider
|
||||
func NewDatabasePasskeyProvider(db *sql.DB, opts DatabasePasskeyProviderOptions) *DatabasePasskeyProvider {
|
||||
if opts.Timeout == 0 {
|
||||
opts.Timeout = 60000 // 60 seconds default
|
||||
}
|
||||
|
||||
sqlNames := MergeSQLNames(DefaultSQLNames(), opts.SQLNames)
|
||||
|
||||
return &DatabasePasskeyProvider{
|
||||
db: db,
|
||||
dbFactory: opts.DBFactory,
|
||||
rpID: opts.RPID,
|
||||
rpName: opts.RPName,
|
||||
rpOrigin: opts.RPOrigin,
|
||||
timeout: opts.Timeout,
|
||||
sqlNames: sqlNames,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DatabasePasskeyProvider) getDB() *sql.DB {
|
||||
p.dbMu.RLock()
|
||||
defer p.dbMu.RUnlock()
|
||||
return p.db
|
||||
}
|
||||
|
||||
func (p *DatabasePasskeyProvider) reconnectDB() error {
|
||||
if p.dbFactory == nil {
|
||||
return fmt.Errorf("no db factory configured for reconnect")
|
||||
}
|
||||
newDB, err := p.dbFactory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.dbMu.Lock()
|
||||
p.db = newDB
|
||||
p.dbMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// BeginRegistration creates registration options for a new passkey
|
||||
func (p *DatabasePasskeyProvider) BeginRegistration(ctx context.Context, userID int, username, displayName string) (*PasskeyRegistrationOptions, error) {
|
||||
// Generate challenge
|
||||
challenge := make([]byte, 32)
|
||||
if _, err := rand.Read(challenge); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate challenge: %w", err)
|
||||
}
|
||||
|
||||
// Get existing credentials to exclude
|
||||
credentials, err := p.GetCredentials(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get existing credentials: %w", err)
|
||||
}
|
||||
|
||||
excludeCredentials := make([]PasskeyCredentialDescriptor, 0, len(credentials))
|
||||
for i := range credentials {
|
||||
excludeCredentials = append(excludeCredentials, PasskeyCredentialDescriptor{
|
||||
Type: "public-key",
|
||||
ID: credentials[i].CredentialID,
|
||||
Transports: credentials[i].Transports,
|
||||
})
|
||||
}
|
||||
|
||||
// Create user handle (persistent user ID)
|
||||
userHandle := []byte(fmt.Sprintf("user_%d", userID))
|
||||
|
||||
return &PasskeyRegistrationOptions{
|
||||
Challenge: challenge,
|
||||
RelyingParty: PasskeyRelyingParty{
|
||||
ID: p.rpID,
|
||||
Name: p.rpName,
|
||||
},
|
||||
User: PasskeyUser{
|
||||
ID: userHandle,
|
||||
Name: username,
|
||||
DisplayName: displayName,
|
||||
},
|
||||
PubKeyCredParams: []PasskeyCredentialParam{
|
||||
{Type: "public-key", Alg: -7}, // ES256 (ECDSA with SHA-256)
|
||||
{Type: "public-key", Alg: -257}, // RS256 (RSASSA-PKCS1-v1_5 with SHA-256)
|
||||
},
|
||||
Timeout: p.timeout,
|
||||
ExcludeCredentials: excludeCredentials,
|
||||
AuthenticatorSelection: &PasskeyAuthenticatorSelection{
|
||||
RequireResidentKey: false,
|
||||
ResidentKey: "preferred",
|
||||
UserVerification: "preferred",
|
||||
},
|
||||
Attestation: "none",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CompleteRegistration verifies and stores a new passkey credential
|
||||
// NOTE: This is a simplified implementation. In production, you should use a WebAuthn library
|
||||
// like github.com/go-webauthn/webauthn to properly verify attestation and parse credentials.
|
||||
func (p *DatabasePasskeyProvider) CompleteRegistration(ctx context.Context, userID int, response PasskeyRegistrationResponse, expectedChallenge []byte) (*PasskeyCredential, error) {
|
||||
// TODO: Implement full WebAuthn verification
|
||||
// 1. Verify clientDataJSON contains correct challenge and origin
|
||||
// 2. Parse and verify attestationObject
|
||||
// 3. Extract public key and credential ID
|
||||
// 4. Verify attestation signature (if not "none")
|
||||
|
||||
// For now, this is a placeholder that stores the credential data
|
||||
// In production, you MUST use a proper WebAuthn library
|
||||
|
||||
credData := map[string]any{
|
||||
"user_id": userID,
|
||||
"credential_id": base64.StdEncoding.EncodeToString(response.RawID),
|
||||
"public_key": base64.StdEncoding.EncodeToString(response.Response.AttestationObject),
|
||||
"attestation_type": "none",
|
||||
"sign_count": 0,
|
||||
"transports": response.Transports,
|
||||
"backup_eligible": false,
|
||||
"backup_state": false,
|
||||
"name": "Passkey",
|
||||
}
|
||||
|
||||
credJSON, err := json.Marshal(credData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal credential data: %w", err)
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var credentialID sql.NullInt64
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_credential_id FROM %s($1::jsonb)`, p.sqlNames.PasskeyStoreCredential)
|
||||
err = p.getDB().QueryRowContext(ctx, query, string(credJSON)).Scan(&success, &errorMsg, &credentialID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to store credential: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return nil, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to store credential")
|
||||
}
|
||||
|
||||
return &PasskeyCredential{
|
||||
ID: fmt.Sprintf("%d", credentialID.Int64),
|
||||
UserID: userID,
|
||||
CredentialID: response.RawID,
|
||||
PublicKey: response.Response.AttestationObject,
|
||||
AttestationType: "none",
|
||||
Transports: response.Transports,
|
||||
CreatedAt: time.Now(),
|
||||
LastUsedAt: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BeginAuthentication creates authentication options for passkey login
|
||||
func (p *DatabasePasskeyProvider) BeginAuthentication(ctx context.Context, username string) (*PasskeyAuthenticationOptions, error) {
|
||||
// Generate challenge
|
||||
challenge := make([]byte, 32)
|
||||
if _, err := rand.Read(challenge); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate challenge: %w", err)
|
||||
}
|
||||
|
||||
// If username is provided, get user's credentials
|
||||
var allowCredentials []PasskeyCredentialDescriptor
|
||||
if username != "" {
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var userID sql.NullInt64
|
||||
var credentialsJSON sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_user_id, p_credentials::text FROM %s($1)`, p.sqlNames.PasskeyGetCredsByUsername)
|
||||
err := p.getDB().QueryRowContext(ctx, query, username).Scan(&success, &errorMsg, &userID, &credentialsJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get credentials: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return nil, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get credentials")
|
||||
}
|
||||
|
||||
// Parse credentials
|
||||
var creds []struct {
|
||||
ID string `json:"credential_id"`
|
||||
Transports []string `json:"transports"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(credentialsJSON.String), &creds); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
||||
}
|
||||
|
||||
allowCredentials = make([]PasskeyCredentialDescriptor, 0, len(creds))
|
||||
for _, cred := range creds {
|
||||
credID, err := base64.StdEncoding.DecodeString(cred.ID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
allowCredentials = append(allowCredentials, PasskeyCredentialDescriptor{
|
||||
Type: "public-key",
|
||||
ID: credID,
|
||||
Transports: cred.Transports,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return &PasskeyAuthenticationOptions{
|
||||
Challenge: challenge,
|
||||
Timeout: p.timeout,
|
||||
RelyingPartyID: p.rpID,
|
||||
AllowCredentials: allowCredentials,
|
||||
UserVerification: "preferred",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CompleteAuthentication verifies a passkey assertion and returns the user ID
|
||||
// NOTE: This is a simplified implementation. In production, you should use a WebAuthn library
|
||||
// like github.com/go-webauthn/webauthn to properly verify the assertion signature.
|
||||
func (p *DatabasePasskeyProvider) CompleteAuthentication(ctx context.Context, response PasskeyAuthenticationResponse, expectedChallenge []byte) (int, error) {
|
||||
// TODO: Implement full WebAuthn verification
|
||||
// 1. Verify clientDataJSON contains correct challenge and origin
|
||||
// 2. Verify authenticatorData
|
||||
// 3. Verify signature using stored public key
|
||||
// 4. Update sign counter and check for cloning
|
||||
|
||||
// Get credential from database
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var credentialJSON sql.NullString
|
||||
|
||||
runQuery := func() error {
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_credential::text FROM %s($1)`, p.sqlNames.PasskeyGetCredential)
|
||||
return p.getDB().QueryRowContext(ctx, query, response.RawID).Scan(&success, &errorMsg, &credentialJSON)
|
||||
}
|
||||
err := runQuery()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := p.reconnectDB(); reconnErr == nil {
|
||||
err = runQuery()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get credential: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return 0, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return 0, fmt.Errorf("credential not found")
|
||||
}
|
||||
|
||||
// Parse credential
|
||||
var cred struct {
|
||||
UserID int `json:"user_id"`
|
||||
SignCount uint32 `json:"sign_count"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(credentialJSON.String), &cred); err != nil {
|
||||
return 0, fmt.Errorf("failed to parse credential: %w", err)
|
||||
}
|
||||
|
||||
// TODO: Verify signature here
|
||||
// For now, we'll just update the counter as a placeholder
|
||||
|
||||
// Update counter (in production, this should be done after successful verification)
|
||||
newCounter := cred.SignCount + 1
|
||||
var updateSuccess bool
|
||||
var updateError sql.NullString
|
||||
var cloneWarning sql.NullBool
|
||||
|
||||
updateQuery := fmt.Sprintf(`SELECT p_success, p_error, p_clone_warning FROM %s($1, $2)`, p.sqlNames.PasskeyUpdateCounter)
|
||||
err = p.getDB().QueryRowContext(ctx, updateQuery, response.RawID, newCounter).Scan(&updateSuccess, &updateError, &cloneWarning)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to update counter: %w", err)
|
||||
}
|
||||
|
||||
if cloneWarning.Valid && cloneWarning.Bool {
|
||||
return 0, fmt.Errorf("credential cloning detected")
|
||||
}
|
||||
|
||||
return cred.UserID, nil
|
||||
}
|
||||
|
||||
// GetCredentials returns all passkey credentials for a user
|
||||
func (p *DatabasePasskeyProvider) GetCredentials(ctx context.Context, userID int) ([]PasskeyCredential, error) {
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var credentialsJSON sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_credentials::text FROM %s($1)`, p.sqlNames.PasskeyGetUserCredentials)
|
||||
err := p.getDB().QueryRowContext(ctx, query, userID).Scan(&success, &errorMsg, &credentialsJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get credentials: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return nil, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get credentials")
|
||||
}
|
||||
|
||||
// Parse credentials
|
||||
var rawCreds []struct {
|
||||
ID int `json:"id"`
|
||||
UserID int `json:"user_id"`
|
||||
CredentialID string `json:"credential_id"`
|
||||
PublicKey string `json:"public_key"`
|
||||
AttestationType string `json:"attestation_type"`
|
||||
AAGUID string `json:"aaguid"`
|
||||
SignCount uint32 `json:"sign_count"`
|
||||
CloneWarning bool `json:"clone_warning"`
|
||||
Transports []string `json:"transports"`
|
||||
BackupEligible bool `json:"backup_eligible"`
|
||||
BackupState bool `json:"backup_state"`
|
||||
Name string `json:"name"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LastUsedAt time.Time `json:"last_used_at"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(credentialsJSON.String), &rawCreds); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
||||
}
|
||||
|
||||
credentials := make([]PasskeyCredential, 0, len(rawCreds))
|
||||
for i := range rawCreds {
|
||||
raw := rawCreds[i]
|
||||
credID, err := base64.StdEncoding.DecodeString(raw.CredentialID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pubKey, err := base64.StdEncoding.DecodeString(raw.PublicKey)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
aaguid, _ := base64.StdEncoding.DecodeString(raw.AAGUID)
|
||||
|
||||
credentials = append(credentials, PasskeyCredential{
|
||||
ID: fmt.Sprintf("%d", raw.ID),
|
||||
UserID: raw.UserID,
|
||||
CredentialID: credID,
|
||||
PublicKey: pubKey,
|
||||
AttestationType: raw.AttestationType,
|
||||
AAGUID: aaguid,
|
||||
SignCount: raw.SignCount,
|
||||
CloneWarning: raw.CloneWarning,
|
||||
Transports: raw.Transports,
|
||||
BackupEligible: raw.BackupEligible,
|
||||
BackupState: raw.BackupState,
|
||||
Name: raw.Name,
|
||||
CreatedAt: raw.CreatedAt,
|
||||
LastUsedAt: raw.LastUsedAt,
|
||||
})
|
||||
}
|
||||
|
||||
return credentials, nil
|
||||
}
|
||||
|
||||
// DeleteCredential removes a passkey credential
|
||||
func (p *DatabasePasskeyProvider) DeleteCredential(ctx context.Context, userID int, credentialID string) error {
|
||||
credID, err := base64.StdEncoding.DecodeString(credentialID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid credential ID: %w", err)
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error FROM %s($1, $2)`, p.sqlNames.PasskeyDeleteCredential)
|
||||
err = p.getDB().QueryRowContext(ctx, query, userID, credID).Scan(&success, &errorMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete credential: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return fmt.Errorf("failed to delete credential")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateCredentialName updates the friendly name of a credential
|
||||
func (p *DatabasePasskeyProvider) UpdateCredentialName(ctx context.Context, userID int, credentialID string, name string) error {
|
||||
credID, err := base64.StdEncoding.DecodeString(credentialID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid credential ID: %w", err)
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error FROM %s($1, $2, $3)`, p.sqlNames.PasskeyUpdateName)
|
||||
err = p.getDB().QueryRowContext(ctx, query, userID, credID, name).Scan(&success, &errorMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update credential name: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return fmt.Errorf("failed to update credential name")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
330
pkg/security/passkey_test.go
Normal file
330
pkg/security/passkey_test.go
Normal file
@@ -0,0 +1,330 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
func TestDatabasePasskeyProvider_BeginRegistration(t *testing.T) {
|
||||
db, mock, err := sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := NewDatabasePasskeyProvider(db, DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com",
|
||||
RPName: "Example App",
|
||||
RPOrigin: "https://example.com",
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Mock get credentials query
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error", "p_credentials"}).
|
||||
AddRow(true, nil, "[]")
|
||||
mock.ExpectQuery(`SELECT p_success, p_error, p_credentials::text FROM resolvespec_passkey_get_user_credentials`).
|
||||
WithArgs(1).
|
||||
WillReturnRows(rows)
|
||||
|
||||
opts, err := provider.BeginRegistration(ctx, 1, "testuser", "Test User")
|
||||
if err != nil {
|
||||
t.Fatalf("BeginRegistration failed: %v", err)
|
||||
}
|
||||
|
||||
if opts.RelyingParty.ID != "example.com" {
|
||||
t.Errorf("expected RP ID 'example.com', got '%s'", opts.RelyingParty.ID)
|
||||
}
|
||||
|
||||
if opts.User.Name != "testuser" {
|
||||
t.Errorf("expected username 'testuser', got '%s'", opts.User.Name)
|
||||
}
|
||||
|
||||
if len(opts.Challenge) != 32 {
|
||||
t.Errorf("expected challenge length 32, got %d", len(opts.Challenge))
|
||||
}
|
||||
|
||||
if len(opts.PubKeyCredParams) != 2 {
|
||||
t.Errorf("expected 2 credential params, got %d", len(opts.PubKeyCredParams))
|
||||
}
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabasePasskeyProvider_BeginAuthentication(t *testing.T) {
|
||||
db, mock, err := sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := NewDatabasePasskeyProvider(db, DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com",
|
||||
RPName: "Example App",
|
||||
RPOrigin: "https://example.com",
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Mock get credentials by username query
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error", "p_user_id", "p_credentials"}).
|
||||
AddRow(true, nil, 1, `[{"credential_id":"YWJjZGVm","transports":["internal"]}]`)
|
||||
mock.ExpectQuery(`SELECT p_success, p_error, p_user_id, p_credentials::text FROM resolvespec_passkey_get_credentials_by_username`).
|
||||
WithArgs("testuser").
|
||||
WillReturnRows(rows)
|
||||
|
||||
opts, err := provider.BeginAuthentication(ctx, "testuser")
|
||||
if err != nil {
|
||||
t.Fatalf("BeginAuthentication failed: %v", err)
|
||||
}
|
||||
|
||||
if opts.RelyingPartyID != "example.com" {
|
||||
t.Errorf("expected RP ID 'example.com', got '%s'", opts.RelyingPartyID)
|
||||
}
|
||||
|
||||
if len(opts.Challenge) != 32 {
|
||||
t.Errorf("expected challenge length 32, got %d", len(opts.Challenge))
|
||||
}
|
||||
|
||||
if len(opts.AllowCredentials) != 1 {
|
||||
t.Errorf("expected 1 allowed credential, got %d", len(opts.AllowCredentials))
|
||||
}
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabasePasskeyProvider_GetCredentials(t *testing.T) {
|
||||
db, mock, err := sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := NewDatabasePasskeyProvider(db, DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com",
|
||||
RPName: "Example App",
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
credentialsJSON := `[{
|
||||
"id": 1,
|
||||
"user_id": 1,
|
||||
"credential_id": "YWJjZGVmMTIzNDU2",
|
||||
"public_key": "cHVibGlja2V5",
|
||||
"attestation_type": "none",
|
||||
"aaguid": "",
|
||||
"sign_count": 5,
|
||||
"clone_warning": false,
|
||||
"transports": ["internal"],
|
||||
"backup_eligible": true,
|
||||
"backup_state": false,
|
||||
"name": "My Phone",
|
||||
"created_at": "2026-01-01T00:00:00Z",
|
||||
"last_used_at": "2026-01-31T00:00:00Z"
|
||||
}]`
|
||||
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error", "p_credentials"}).
|
||||
AddRow(true, nil, credentialsJSON)
|
||||
mock.ExpectQuery(`SELECT p_success, p_error, p_credentials::text FROM resolvespec_passkey_get_user_credentials`).
|
||||
WithArgs(1).
|
||||
WillReturnRows(rows)
|
||||
|
||||
credentials, err := provider.GetCredentials(ctx, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCredentials failed: %v", err)
|
||||
}
|
||||
|
||||
if len(credentials) != 1 {
|
||||
t.Fatalf("expected 1 credential, got %d", len(credentials))
|
||||
}
|
||||
|
||||
cred := credentials[0]
|
||||
if cred.UserID != 1 {
|
||||
t.Errorf("expected user ID 1, got %d", cred.UserID)
|
||||
}
|
||||
if cred.Name != "My Phone" {
|
||||
t.Errorf("expected name 'My Phone', got '%s'", cred.Name)
|
||||
}
|
||||
if cred.SignCount != 5 {
|
||||
t.Errorf("expected sign count 5, got %d", cred.SignCount)
|
||||
}
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabasePasskeyProvider_DeleteCredential(t *testing.T) {
|
||||
db, mock, err := sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := NewDatabasePasskeyProvider(db, DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com",
|
||||
RPName: "Example App",
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error"}).
|
||||
AddRow(true, nil)
|
||||
mock.ExpectQuery(`SELECT p_success, p_error FROM resolvespec_passkey_delete_credential`).
|
||||
WithArgs(1, sqlmock.AnyArg()).
|
||||
WillReturnRows(rows)
|
||||
|
||||
err = provider.DeleteCredential(ctx, 1, "YWJjZGVmMTIzNDU2")
|
||||
if err != nil {
|
||||
t.Errorf("DeleteCredential failed: %v", err)
|
||||
}
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabasePasskeyProvider_UpdateCredentialName(t *testing.T) {
|
||||
db, mock, err := sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := NewDatabasePasskeyProvider(db, DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com",
|
||||
RPName: "Example App",
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error"}).
|
||||
AddRow(true, nil)
|
||||
mock.ExpectQuery(`SELECT p_success, p_error FROM resolvespec_passkey_update_name`).
|
||||
WithArgs(1, sqlmock.AnyArg(), "New Name").
|
||||
WillReturnRows(rows)
|
||||
|
||||
err = provider.UpdateCredentialName(ctx, 1, "YWJjZGVmMTIzNDU2", "New Name")
|
||||
if err != nil {
|
||||
t.Errorf("UpdateCredentialName failed: %v", err)
|
||||
}
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseAuthenticator_PasskeyMethods(t *testing.T) {
|
||||
db, mock, err := sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
passkeyProvider := NewDatabasePasskeyProvider(db, DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com",
|
||||
RPName: "Example App",
|
||||
})
|
||||
|
||||
auth := NewDatabaseAuthenticatorWithOptions(db, DatabaseAuthenticatorOptions{
|
||||
PasskeyProvider: passkeyProvider,
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("BeginPasskeyRegistration", func(t *testing.T) {
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error", "p_credentials"}).
|
||||
AddRow(true, nil, "[]")
|
||||
mock.ExpectQuery(`SELECT p_success, p_error, p_credentials::text FROM resolvespec_passkey_get_user_credentials`).
|
||||
WithArgs(1).
|
||||
WillReturnRows(rows)
|
||||
|
||||
opts, err := auth.BeginPasskeyRegistration(ctx, PasskeyBeginRegistrationRequest{
|
||||
UserID: 1,
|
||||
Username: "testuser",
|
||||
DisplayName: "Test User",
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("BeginPasskeyRegistration failed: %v", err)
|
||||
}
|
||||
|
||||
if opts == nil {
|
||||
t.Error("expected options, got nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("GetPasskeyCredentials", func(t *testing.T) {
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error", "p_credentials"}).
|
||||
AddRow(true, nil, "[]")
|
||||
mock.ExpectQuery(`SELECT p_success, p_error, p_credentials::text FROM resolvespec_passkey_get_user_credentials`).
|
||||
WithArgs(1).
|
||||
WillReturnRows(rows)
|
||||
|
||||
credentials, err := auth.GetPasskeyCredentials(ctx, 1)
|
||||
if err != nil {
|
||||
t.Errorf("GetPasskeyCredentials failed: %v", err)
|
||||
}
|
||||
|
||||
if credentials == nil {
|
||||
t.Error("expected credentials slice, got nil")
|
||||
}
|
||||
})
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseAuthenticator_WithoutPasskey(t *testing.T) {
|
||||
db, _, err := sqlmock.New()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock db: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
auth := NewDatabaseAuthenticator(db)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err = auth.BeginPasskeyRegistration(ctx, PasskeyBeginRegistrationRequest{
|
||||
UserID: 1,
|
||||
Username: "testuser",
|
||||
DisplayName: "Test User",
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
t.Error("expected error when passkey provider not configured, got nil")
|
||||
}
|
||||
|
||||
expectedMsg := "passkey provider not configured"
|
||||
if err.Error() != expectedMsg {
|
||||
t.Errorf("expected error '%s', got '%s'", expectedMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPasskeyProvider_NilDB(t *testing.T) {
|
||||
// This test verifies that the provider can be created with nil DB
|
||||
// but operations will fail. In production, always provide a valid DB.
|
||||
var db *sql.DB
|
||||
provider := NewDatabasePasskeyProvider(db, DatabasePasskeyProviderOptions{
|
||||
RPID: "example.com",
|
||||
RPName: "Example App",
|
||||
})
|
||||
|
||||
if provider == nil {
|
||||
t.Error("expected provider to be created even with nil DB")
|
||||
}
|
||||
|
||||
// Verify that the provider has the correct configuration
|
||||
if provider.rpID != "example.com" {
|
||||
t.Errorf("expected RP ID 'example.com', got '%s'", provider.rpID)
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/cache"
|
||||
@@ -57,13 +58,24 @@ func (a *HeaderAuthenticator) Authenticate(r *http.Request) (*UserContext, error
|
||||
|
||||
// DatabaseAuthenticator provides session-based authentication with database storage
|
||||
// All database operations go through stored procedures for security and consistency
|
||||
// Requires stored procedures: resolvespec_login, resolvespec_logout, resolvespec_session,
|
||||
// resolvespec_session_update, resolvespec_refresh_token
|
||||
// Procedure names are configurable via SQLNames (see DefaultSQLNames for defaults)
|
||||
// See database_schema.sql for procedure definitions
|
||||
// Also supports multiple OAuth2 providers configured with WithOAuth2()
|
||||
// Also supports passkey authentication configured with WithPasskey()
|
||||
type DatabaseAuthenticator struct {
|
||||
db *sql.DB
|
||||
dbMu sync.RWMutex
|
||||
dbFactory func() (*sql.DB, error)
|
||||
cache *cache.Cache
|
||||
cacheTTL time.Duration
|
||||
sqlNames *SQLNames
|
||||
|
||||
// OAuth2 providers registry (multiple providers supported)
|
||||
oauth2Providers map[string]*OAuth2Provider
|
||||
oauth2ProvidersMutex sync.RWMutex
|
||||
|
||||
// Passkey provider (optional)
|
||||
passkeyProvider PasskeyProvider
|
||||
}
|
||||
|
||||
// DatabaseAuthenticatorOptions configures the database authenticator
|
||||
@@ -73,6 +85,14 @@ type DatabaseAuthenticatorOptions struct {
|
||||
CacheTTL time.Duration
|
||||
// Cache is an optional cache instance. If nil, uses the default cache
|
||||
Cache *cache.Cache
|
||||
// PasskeyProvider is an optional passkey provider for WebAuthn/FIDO2 authentication
|
||||
PasskeyProvider PasskeyProvider
|
||||
// SQLNames provides custom SQL procedure/function names. If nil, uses DefaultSQLNames().
|
||||
// Partial overrides are supported: only set the fields you want to change.
|
||||
SQLNames *SQLNames
|
||||
// DBFactory is called to obtain a fresh *sql.DB when the existing connection is closed.
|
||||
// If nil, reconnection is disabled.
|
||||
DBFactory func() (*sql.DB, error)
|
||||
}
|
||||
|
||||
func NewDatabaseAuthenticator(db *sql.DB) *DatabaseAuthenticator {
|
||||
@@ -91,13 +111,38 @@ func NewDatabaseAuthenticatorWithOptions(db *sql.DB, opts DatabaseAuthenticatorO
|
||||
cacheInstance = cache.GetDefaultCache()
|
||||
}
|
||||
|
||||
sqlNames := MergeSQLNames(DefaultSQLNames(), opts.SQLNames)
|
||||
|
||||
return &DatabaseAuthenticator{
|
||||
db: db,
|
||||
dbFactory: opts.DBFactory,
|
||||
cache: cacheInstance,
|
||||
cacheTTL: opts.CacheTTL,
|
||||
sqlNames: sqlNames,
|
||||
passkeyProvider: opts.PasskeyProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *DatabaseAuthenticator) getDB() *sql.DB {
|
||||
a.dbMu.RLock()
|
||||
defer a.dbMu.RUnlock()
|
||||
return a.db
|
||||
}
|
||||
|
||||
func (a *DatabaseAuthenticator) reconnectDB() error {
|
||||
if a.dbFactory == nil {
|
||||
return fmt.Errorf("no db factory configured for reconnect")
|
||||
}
|
||||
newDB, err := a.dbFactory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.dbMu.Lock()
|
||||
a.db = newDB
|
||||
a.dbMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *DatabaseAuthenticator) Login(ctx context.Context, req LoginRequest) (*LoginResponse, error) {
|
||||
// Convert LoginRequest to JSON
|
||||
reqJSON, err := json.Marshal(req)
|
||||
@@ -105,13 +150,20 @@ func (a *DatabaseAuthenticator) Login(ctx context.Context, req LoginRequest) (*L
|
||||
return nil, fmt.Errorf("failed to marshal login request: %w", err)
|
||||
}
|
||||
|
||||
// Call resolvespec_login stored procedure
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var dataJSON sql.NullString
|
||||
|
||||
query := `SELECT p_success, p_error, p_data::text FROM resolvespec_login($1::jsonb)`
|
||||
err = a.db.QueryRowContext(ctx, query, string(reqJSON)).Scan(&success, &errorMsg, &dataJSON)
|
||||
runLoginQuery := func() error {
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_data::text FROM %s($1::jsonb)`, a.sqlNames.Login)
|
||||
return a.getDB().QueryRowContext(ctx, query, string(reqJSON)).Scan(&success, &errorMsg, &dataJSON)
|
||||
}
|
||||
err = runLoginQuery()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := a.reconnectDB(); reconnErr == nil {
|
||||
err = runLoginQuery()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("login query failed: %w", err)
|
||||
}
|
||||
@@ -132,6 +184,40 @@ func (a *DatabaseAuthenticator) Login(ctx context.Context, req LoginRequest) (*L
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// Register implements Registrable interface
|
||||
func (a *DatabaseAuthenticator) Register(ctx context.Context, req RegisterRequest) (*LoginResponse, error) {
|
||||
// Convert RegisterRequest to JSON
|
||||
reqJSON, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal register request: %w", err)
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var dataJSON sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_data::text FROM %s($1::jsonb)`, a.sqlNames.Register)
|
||||
err = a.getDB().QueryRowContext(ctx, query, string(reqJSON)).Scan(&success, &errorMsg, &dataJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("register query failed: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return nil, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return nil, fmt.Errorf("registration failed")
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var response LoginResponse
|
||||
if err := json.Unmarshal([]byte(dataJSON.String), &response); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse register response: %w", err)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
func (a *DatabaseAuthenticator) Logout(ctx context.Context, req LogoutRequest) error {
|
||||
// Convert LogoutRequest to JSON
|
||||
reqJSON, err := json.Marshal(req)
|
||||
@@ -139,13 +225,12 @@ func (a *DatabaseAuthenticator) Logout(ctx context.Context, req LogoutRequest) e
|
||||
return fmt.Errorf("failed to marshal logout request: %w", err)
|
||||
}
|
||||
|
||||
// Call resolvespec_logout stored procedure
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var dataJSON sql.NullString
|
||||
|
||||
query := `SELECT p_success, p_error, p_data::text FROM resolvespec_logout($1::jsonb)`
|
||||
err = a.db.QueryRowContext(ctx, query, string(reqJSON)).Scan(&success, &errorMsg, &dataJSON)
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_data::text FROM %s($1::jsonb)`, a.sqlNames.Logout)
|
||||
err = a.getDB().QueryRowContext(ctx, query, string(reqJSON)).Scan(&success, &errorMsg, &dataJSON)
|
||||
if err != nil {
|
||||
return fmt.Errorf("logout query failed: %w", err)
|
||||
}
|
||||
@@ -174,9 +259,8 @@ func (a *DatabaseAuthenticator) Authenticate(r *http.Request) (*UserContext, err
|
||||
|
||||
if sessionToken == "" {
|
||||
// Try cookie
|
||||
cookie, err := r.Cookie("session_token")
|
||||
if err == nil {
|
||||
tokens = []string{cookie.Value}
|
||||
if token := GetSessionCookie(r); token != "" {
|
||||
tokens = []string{token}
|
||||
reference = "cookie"
|
||||
}
|
||||
} else {
|
||||
@@ -219,8 +303,8 @@ func (a *DatabaseAuthenticator) Authenticate(r *http.Request) (*UserContext, err
|
||||
var errorMsg sql.NullString
|
||||
var userJSON sql.NullString
|
||||
|
||||
query := `SELECT p_success, p_error, p_user::text FROM resolvespec_session($1, $2)`
|
||||
err := a.db.QueryRowContext(r.Context(), query, token, reference).Scan(&success, &errorMsg, &userJSON)
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_user::text FROM %s($1, $2)`, a.sqlNames.Session)
|
||||
err := a.getDB().QueryRowContext(r.Context(), query, token, reference).Scan(&success, &errorMsg, &userJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("session query failed: %w", err)
|
||||
}
|
||||
@@ -291,25 +375,23 @@ func (a *DatabaseAuthenticator) updateSessionActivity(ctx context.Context, sessi
|
||||
return
|
||||
}
|
||||
|
||||
// Call resolvespec_session_update stored procedure
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var updatedUserJSON sql.NullString
|
||||
|
||||
query := `SELECT p_success, p_error, p_user::text FROM resolvespec_session_update($1, $2::jsonb)`
|
||||
_ = a.db.QueryRowContext(ctx, query, sessionToken, string(userJSON)).Scan(&success, &errorMsg, &updatedUserJSON)
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_user::text FROM %s($1, $2::jsonb)`, a.sqlNames.SessionUpdate)
|
||||
_ = a.getDB().QueryRowContext(ctx, query, sessionToken, string(userJSON)).Scan(&success, &errorMsg, &updatedUserJSON)
|
||||
}
|
||||
|
||||
// RefreshToken implements Refreshable interface
|
||||
func (a *DatabaseAuthenticator) RefreshToken(ctx context.Context, refreshToken string) (*LoginResponse, error) {
|
||||
// Call api_refresh_token stored procedure
|
||||
// First, we need to get the current user context for the refresh token
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var userJSON sql.NullString
|
||||
// Get current session to pass to refresh
|
||||
query := `SELECT p_success, p_error, p_user::text FROM resolvespec_session($1, $2)`
|
||||
err := a.db.QueryRowContext(ctx, query, refreshToken, "refresh").Scan(&success, &errorMsg, &userJSON)
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_user::text FROM %s($1, $2)`, a.sqlNames.Session)
|
||||
err := a.getDB().QueryRowContext(ctx, query, refreshToken, "refresh").Scan(&success, &errorMsg, &userJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("refresh token query failed: %w", err)
|
||||
}
|
||||
@@ -321,13 +403,12 @@ func (a *DatabaseAuthenticator) RefreshToken(ctx context.Context, refreshToken s
|
||||
return nil, fmt.Errorf("invalid refresh token")
|
||||
}
|
||||
|
||||
// Call resolvespec_refresh_token to generate new token
|
||||
var newSuccess bool
|
||||
var newErrorMsg sql.NullString
|
||||
var newUserJSON sql.NullString
|
||||
|
||||
refreshQuery := `SELECT p_success, p_error, p_user::text FROM resolvespec_refresh_token($1, $2::jsonb)`
|
||||
err = a.db.QueryRowContext(ctx, refreshQuery, refreshToken, userJSON).Scan(&newSuccess, &newErrorMsg, &newUserJSON)
|
||||
refreshQuery := fmt.Sprintf(`SELECT p_success, p_error, p_user::text FROM %s($1, $2::jsonb)`, a.sqlNames.RefreshToken)
|
||||
err = a.getDB().QueryRowContext(ctx, refreshQuery, refreshToken, userJSON).Scan(&newSuccess, &newErrorMsg, &newUserJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("refresh token generation failed: %w", err)
|
||||
}
|
||||
@@ -354,28 +435,65 @@ func (a *DatabaseAuthenticator) RefreshToken(ctx context.Context, refreshToken s
|
||||
|
||||
// JWTAuthenticator provides JWT token-based authentication
|
||||
// All database operations go through stored procedures
|
||||
// Requires stored procedures: resolvespec_jwt_login, resolvespec_jwt_logout
|
||||
// Procedure names are configurable via SQLNames (see DefaultSQLNames for defaults)
|
||||
// NOTE: JWT signing/verification requires github.com/golang-jwt/jwt/v5 to be installed and imported
|
||||
type JWTAuthenticator struct {
|
||||
secretKey []byte
|
||||
db *sql.DB
|
||||
dbMu sync.RWMutex
|
||||
dbFactory func() (*sql.DB, error)
|
||||
sqlNames *SQLNames
|
||||
}
|
||||
|
||||
func NewJWTAuthenticator(secretKey string, db *sql.DB) *JWTAuthenticator {
|
||||
func NewJWTAuthenticator(secretKey string, db *sql.DB, names ...*SQLNames) *JWTAuthenticator {
|
||||
return &JWTAuthenticator{
|
||||
secretKey: []byte(secretKey),
|
||||
db: db,
|
||||
sqlNames: resolveSQLNames(names...),
|
||||
}
|
||||
}
|
||||
|
||||
// WithDBFactory configures a factory used to reopen the database connection if it is closed.
|
||||
func (a *JWTAuthenticator) WithDBFactory(factory func() (*sql.DB, error)) *JWTAuthenticator {
|
||||
a.dbFactory = factory
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *JWTAuthenticator) getDB() *sql.DB {
|
||||
a.dbMu.RLock()
|
||||
defer a.dbMu.RUnlock()
|
||||
return a.db
|
||||
}
|
||||
|
||||
func (a *JWTAuthenticator) reconnectDB() error {
|
||||
if a.dbFactory == nil {
|
||||
return fmt.Errorf("no db factory configured for reconnect")
|
||||
}
|
||||
newDB, err := a.dbFactory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.dbMu.Lock()
|
||||
a.db = newDB
|
||||
a.dbMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *JWTAuthenticator) Login(ctx context.Context, req LoginRequest) (*LoginResponse, error) {
|
||||
// Call resolvespec_jwt_login stored procedure
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var userJSON []byte
|
||||
|
||||
query := `SELECT p_success, p_error, p_user FROM resolvespec_jwt_login($1, $2)`
|
||||
err := a.db.QueryRowContext(ctx, query, req.Username, req.Password).Scan(&success, &errorMsg, &userJSON)
|
||||
runLoginQuery := func() error {
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_user FROM %s($1, $2)`, a.sqlNames.JWTLogin)
|
||||
return a.getDB().QueryRowContext(ctx, query, req.Username, req.Password).Scan(&success, &errorMsg, &userJSON)
|
||||
}
|
||||
err := runLoginQuery()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := a.reconnectDB(); reconnErr == nil {
|
||||
err = runLoginQuery()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("login query failed: %w", err)
|
||||
}
|
||||
@@ -424,12 +542,11 @@ func (a *JWTAuthenticator) Login(ctx context.Context, req LoginRequest) (*LoginR
|
||||
}
|
||||
|
||||
func (a *JWTAuthenticator) Logout(ctx context.Context, req LogoutRequest) error {
|
||||
// Call resolvespec_jwt_logout stored procedure
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
|
||||
query := `SELECT p_success, p_error FROM resolvespec_jwt_logout($1, $2)`
|
||||
err := a.db.QueryRowContext(ctx, query, req.Token, req.UserID).Scan(&success, &errorMsg)
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error FROM %s($1, $2)`, a.sqlNames.JWTLogout)
|
||||
err := a.getDB().QueryRowContext(ctx, query, req.Token, req.UserID).Scan(&success, &errorMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("logout query failed: %w", err)
|
||||
}
|
||||
@@ -464,25 +581,60 @@ func (a *JWTAuthenticator) Authenticate(r *http.Request) (*UserContext, error) {
|
||||
|
||||
// DatabaseColumnSecurityProvider loads column security from database
|
||||
// All database operations go through stored procedures
|
||||
// Requires stored procedure: resolvespec_column_security
|
||||
// Procedure names are configurable via SQLNames (see DefaultSQLNames for defaults)
|
||||
type DatabaseColumnSecurityProvider struct {
|
||||
db *sql.DB
|
||||
dbMu sync.RWMutex
|
||||
dbFactory func() (*sql.DB, error)
|
||||
sqlNames *SQLNames
|
||||
}
|
||||
|
||||
func NewDatabaseColumnSecurityProvider(db *sql.DB) *DatabaseColumnSecurityProvider {
|
||||
return &DatabaseColumnSecurityProvider{db: db}
|
||||
func NewDatabaseColumnSecurityProvider(db *sql.DB, names ...*SQLNames) *DatabaseColumnSecurityProvider {
|
||||
return &DatabaseColumnSecurityProvider{db: db, sqlNames: resolveSQLNames(names...)}
|
||||
}
|
||||
|
||||
func (p *DatabaseColumnSecurityProvider) WithDBFactory(factory func() (*sql.DB, error)) *DatabaseColumnSecurityProvider {
|
||||
p.dbFactory = factory
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *DatabaseColumnSecurityProvider) getDB() *sql.DB {
|
||||
p.dbMu.RLock()
|
||||
defer p.dbMu.RUnlock()
|
||||
return p.db
|
||||
}
|
||||
|
||||
func (p *DatabaseColumnSecurityProvider) reconnectDB() error {
|
||||
if p.dbFactory == nil {
|
||||
return fmt.Errorf("no db factory configured for reconnect")
|
||||
}
|
||||
newDB, err := p.dbFactory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.dbMu.Lock()
|
||||
p.db = newDB
|
||||
p.dbMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *DatabaseColumnSecurityProvider) GetColumnSecurity(ctx context.Context, userID int, schema, table string) ([]ColumnSecurity, error) {
|
||||
var rules []ColumnSecurity
|
||||
|
||||
// Call resolvespec_column_security stored procedure
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var rulesJSON []byte
|
||||
|
||||
query := `SELECT p_success, p_error, p_rules FROM resolvespec_column_security($1, $2, $3)`
|
||||
err := p.db.QueryRowContext(ctx, query, userID, schema, table).Scan(&success, &errorMsg, &rulesJSON)
|
||||
runQuery := func() error {
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_rules FROM %s($1, $2, $3)`, p.sqlNames.ColumnSecurity)
|
||||
return p.getDB().QueryRowContext(ctx, query, userID, schema, table).Scan(&success, &errorMsg, &rulesJSON)
|
||||
}
|
||||
err := runQuery()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := p.reconnectDB(); reconnErr == nil {
|
||||
err = runQuery()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load column security: %w", err)
|
||||
}
|
||||
@@ -529,23 +681,57 @@ func (p *DatabaseColumnSecurityProvider) GetColumnSecurity(ctx context.Context,
|
||||
|
||||
// DatabaseRowSecurityProvider loads row security from database
|
||||
// All database operations go through stored procedures
|
||||
// Requires stored procedure: resolvespec_row_security
|
||||
// Procedure names are configurable via SQLNames (see DefaultSQLNames for defaults)
|
||||
type DatabaseRowSecurityProvider struct {
|
||||
db *sql.DB
|
||||
dbMu sync.RWMutex
|
||||
dbFactory func() (*sql.DB, error)
|
||||
sqlNames *SQLNames
|
||||
}
|
||||
|
||||
func NewDatabaseRowSecurityProvider(db *sql.DB) *DatabaseRowSecurityProvider {
|
||||
return &DatabaseRowSecurityProvider{db: db}
|
||||
func NewDatabaseRowSecurityProvider(db *sql.DB, names ...*SQLNames) *DatabaseRowSecurityProvider {
|
||||
return &DatabaseRowSecurityProvider{db: db, sqlNames: resolveSQLNames(names...)}
|
||||
}
|
||||
|
||||
func (p *DatabaseRowSecurityProvider) WithDBFactory(factory func() (*sql.DB, error)) *DatabaseRowSecurityProvider {
|
||||
p.dbFactory = factory
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *DatabaseRowSecurityProvider) getDB() *sql.DB {
|
||||
p.dbMu.RLock()
|
||||
defer p.dbMu.RUnlock()
|
||||
return p.db
|
||||
}
|
||||
|
||||
func (p *DatabaseRowSecurityProvider) reconnectDB() error {
|
||||
if p.dbFactory == nil {
|
||||
return fmt.Errorf("no db factory configured for reconnect")
|
||||
}
|
||||
newDB, err := p.dbFactory()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.dbMu.Lock()
|
||||
p.db = newDB
|
||||
p.dbMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *DatabaseRowSecurityProvider) GetRowSecurity(ctx context.Context, userID int, schema, table string) (RowSecurity, error) {
|
||||
var template string
|
||||
var hasBlock bool
|
||||
|
||||
// Call resolvespec_row_security stored procedure
|
||||
query := `SELECT p_template, p_block FROM resolvespec_row_security($1, $2, $3)`
|
||||
|
||||
err := p.db.QueryRowContext(ctx, query, schema, table, userID).Scan(&template, &hasBlock)
|
||||
runQuery := func() error {
|
||||
query := fmt.Sprintf(`SELECT p_template, p_block FROM %s($1, $2, $3)`, p.sqlNames.RowSecurity)
|
||||
return p.getDB().QueryRowContext(ctx, query, schema, table, userID).Scan(&template, &hasBlock)
|
||||
}
|
||||
err := runQuery()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := p.reconnectDB(); reconnErr == nil {
|
||||
err = runQuery()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return RowSecurity{}, fmt.Errorf("failed to load row security: %w", err)
|
||||
}
|
||||
@@ -615,6 +801,11 @@ func (p *ConfigRowSecurityProvider) GetRowSecurity(ctx context.Context, userID i
|
||||
// Helper functions
|
||||
// ================
|
||||
|
||||
// isDBClosed reports whether err indicates the *sql.DB has been closed.
|
||||
func isDBClosed(err error) bool {
|
||||
return err != nil && strings.Contains(err.Error(), "sql: database is closed")
|
||||
}
|
||||
|
||||
func parseRoles(rolesStr string) []string {
|
||||
if rolesStr == "" {
|
||||
return []string{}
|
||||
@@ -654,3 +845,134 @@ func generateRandomString(length int) string {
|
||||
// }
|
||||
// return ""
|
||||
// }
|
||||
|
||||
// Passkey authentication methods
|
||||
// ==============================
|
||||
|
||||
// WithPasskey configures the DatabaseAuthenticator with a passkey provider
|
||||
func (a *DatabaseAuthenticator) WithPasskey(provider PasskeyProvider) *DatabaseAuthenticator {
|
||||
a.passkeyProvider = provider
|
||||
return a
|
||||
}
|
||||
|
||||
// BeginPasskeyRegistration initiates passkey registration for a user
|
||||
func (a *DatabaseAuthenticator) BeginPasskeyRegistration(ctx context.Context, req PasskeyBeginRegistrationRequest) (*PasskeyRegistrationOptions, error) {
|
||||
if a.passkeyProvider == nil {
|
||||
return nil, fmt.Errorf("passkey provider not configured")
|
||||
}
|
||||
return a.passkeyProvider.BeginRegistration(ctx, req.UserID, req.Username, req.DisplayName)
|
||||
}
|
||||
|
||||
// CompletePasskeyRegistration completes passkey registration
|
||||
func (a *DatabaseAuthenticator) CompletePasskeyRegistration(ctx context.Context, req PasskeyRegisterRequest) (*PasskeyCredential, error) {
|
||||
if a.passkeyProvider == nil {
|
||||
return nil, fmt.Errorf("passkey provider not configured")
|
||||
}
|
||||
|
||||
cred, err := a.passkeyProvider.CompleteRegistration(ctx, req.UserID, req.Response, req.ExpectedChallenge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update credential name if provided
|
||||
if req.CredentialName != "" && cred.ID != "" {
|
||||
_ = a.passkeyProvider.UpdateCredentialName(ctx, req.UserID, cred.ID, req.CredentialName)
|
||||
}
|
||||
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
// BeginPasskeyAuthentication initiates passkey authentication
|
||||
func (a *DatabaseAuthenticator) BeginPasskeyAuthentication(ctx context.Context, req PasskeyBeginAuthenticationRequest) (*PasskeyAuthenticationOptions, error) {
|
||||
if a.passkeyProvider == nil {
|
||||
return nil, fmt.Errorf("passkey provider not configured")
|
||||
}
|
||||
return a.passkeyProvider.BeginAuthentication(ctx, req.Username)
|
||||
}
|
||||
|
||||
// LoginWithPasskey authenticates a user using a passkey and creates a session
|
||||
func (a *DatabaseAuthenticator) LoginWithPasskey(ctx context.Context, req PasskeyLoginRequest) (*LoginResponse, error) {
|
||||
if a.passkeyProvider == nil {
|
||||
return nil, fmt.Errorf("passkey provider not configured")
|
||||
}
|
||||
|
||||
// Verify passkey assertion
|
||||
userID, err := a.passkeyProvider.CompleteAuthentication(ctx, req.Response, req.ExpectedChallenge)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("passkey authentication failed: %w", err)
|
||||
}
|
||||
|
||||
// Build request JSON for passkey login stored procedure
|
||||
reqData := map[string]any{
|
||||
"user_id": userID,
|
||||
}
|
||||
if req.Claims != nil {
|
||||
if ip, ok := req.Claims["ip_address"].(string); ok {
|
||||
reqData["ip_address"] = ip
|
||||
}
|
||||
if ua, ok := req.Claims["user_agent"].(string); ok {
|
||||
reqData["user_agent"] = ua
|
||||
}
|
||||
}
|
||||
|
||||
reqJSON, err := json.Marshal(reqData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal passkey login request: %w", err)
|
||||
}
|
||||
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var dataJSON sql.NullString
|
||||
|
||||
runPasskeyQuery := func() error {
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_data::text FROM %s($1::jsonb)`, a.sqlNames.PasskeyLogin)
|
||||
return a.getDB().QueryRowContext(ctx, query, string(reqJSON)).Scan(&success, &errorMsg, &dataJSON)
|
||||
}
|
||||
err = runPasskeyQuery()
|
||||
if isDBClosed(err) {
|
||||
if reconnErr := a.reconnectDB(); reconnErr == nil {
|
||||
err = runPasskeyQuery()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("passkey login query failed: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return nil, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return nil, fmt.Errorf("passkey login failed")
|
||||
}
|
||||
|
||||
var response LoginResponse
|
||||
if err := json.Unmarshal([]byte(dataJSON.String), &response); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse passkey login response: %w", err)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// GetPasskeyCredentials returns all passkey credentials for a user
|
||||
func (a *DatabaseAuthenticator) GetPasskeyCredentials(ctx context.Context, userID int) ([]PasskeyCredential, error) {
|
||||
if a.passkeyProvider == nil {
|
||||
return nil, fmt.Errorf("passkey provider not configured")
|
||||
}
|
||||
return a.passkeyProvider.GetCredentials(ctx, userID)
|
||||
}
|
||||
|
||||
// DeletePasskeyCredential removes a passkey credential
|
||||
func (a *DatabaseAuthenticator) DeletePasskeyCredential(ctx context.Context, userID int, credentialID string) error {
|
||||
if a.passkeyProvider == nil {
|
||||
return fmt.Errorf("passkey provider not configured")
|
||||
}
|
||||
return a.passkeyProvider.DeleteCredential(ctx, userID, credentialID)
|
||||
}
|
||||
|
||||
// UpdatePasskeyCredentialName updates the friendly name of a credential
|
||||
func (a *DatabaseAuthenticator) UpdatePasskeyCredentialName(ctx context.Context, userID int, credentialID string, name string) error {
|
||||
if a.passkeyProvider == nil {
|
||||
return fmt.Errorf("passkey provider not configured")
|
||||
}
|
||||
return a.passkeyProvider.UpdateCredentialName(ctx, userID, credentialID, name)
|
||||
}
|
||||
|
||||
@@ -635,6 +635,94 @@ func TestDatabaseAuthenticator(t *testing.T) {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("successful registration", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
req := RegisterRequest{
|
||||
Username: "newuser",
|
||||
Password: "password123",
|
||||
Email: "newuser@example.com",
|
||||
UserLevel: 1,
|
||||
Roles: []string{"user"},
|
||||
}
|
||||
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error", "p_data"}).
|
||||
AddRow(true, nil, `{"token":"abc123","user":{"user_id":1,"user_name":"newuser","email":"newuser@example.com"},"expires_in":86400}`)
|
||||
|
||||
mock.ExpectQuery(`SELECT p_success, p_error, p_data::text FROM resolvespec_register`).
|
||||
WithArgs(sqlmock.AnyArg()).
|
||||
WillReturnRows(rows)
|
||||
|
||||
resp, err := auth.Register(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
|
||||
if resp.Token != "abc123" {
|
||||
t.Errorf("expected token abc123, got %s", resp.Token)
|
||||
}
|
||||
if resp.User.UserName != "newuser" {
|
||||
t.Errorf("expected username newuser, got %s", resp.User.UserName)
|
||||
}
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("registration with duplicate username", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
req := RegisterRequest{
|
||||
Username: "existinguser",
|
||||
Password: "password123",
|
||||
Email: "new@example.com",
|
||||
UserLevel: 1,
|
||||
Roles: []string{"user"},
|
||||
}
|
||||
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error", "p_data"}).
|
||||
AddRow(false, "Username already exists", nil)
|
||||
|
||||
mock.ExpectQuery(`SELECT p_success, p_error, p_data::text FROM resolvespec_register`).
|
||||
WithArgs(sqlmock.AnyArg()).
|
||||
WillReturnRows(rows)
|
||||
|
||||
_, err := auth.Register(ctx, req)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for duplicate username")
|
||||
}
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("registration with duplicate email", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
req := RegisterRequest{
|
||||
Username: "newuser2",
|
||||
Password: "password123",
|
||||
Email: "existing@example.com",
|
||||
UserLevel: 1,
|
||||
Roles: []string{"user"},
|
||||
}
|
||||
|
||||
rows := sqlmock.NewRows([]string{"p_success", "p_error", "p_data"}).
|
||||
AddRow(false, "Email already exists", nil)
|
||||
|
||||
mock.ExpectQuery(`SELECT p_success, p_error, p_data::text FROM resolvespec_register`).
|
||||
WithArgs(sqlmock.AnyArg()).
|
||||
WillReturnRows(rows)
|
||||
|
||||
_, err := auth.Register(ctx, req)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for duplicate email")
|
||||
}
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("unfulfilled expectations: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test DatabaseAuthenticator RefreshToken
|
||||
|
||||
222
pkg/security/sql_names.go
Normal file
222
pkg/security/sql_names.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var validSQLIdentifier = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)
|
||||
|
||||
// SQLNames defines all configurable SQL stored procedure and table names
|
||||
// used by the security package. Override individual fields to remap
|
||||
// to custom database objects. Use DefaultSQLNames() for baseline defaults,
|
||||
// and MergeSQLNames() to apply partial overrides.
|
||||
type SQLNames struct {
|
||||
// Auth procedures (DatabaseAuthenticator)
|
||||
Login string // default: "resolvespec_login"
|
||||
Register string // default: "resolvespec_register"
|
||||
Logout string // default: "resolvespec_logout"
|
||||
Session string // default: "resolvespec_session"
|
||||
SessionUpdate string // default: "resolvespec_session_update"
|
||||
RefreshToken string // default: "resolvespec_refresh_token"
|
||||
|
||||
// JWT procedures (JWTAuthenticator)
|
||||
JWTLogin string // default: "resolvespec_jwt_login"
|
||||
JWTLogout string // default: "resolvespec_jwt_logout"
|
||||
|
||||
// Security policy procedures
|
||||
ColumnSecurity string // default: "resolvespec_column_security"
|
||||
RowSecurity string // default: "resolvespec_row_security"
|
||||
|
||||
// TOTP procedures (DatabaseTwoFactorProvider)
|
||||
TOTPEnable string // default: "resolvespec_totp_enable"
|
||||
TOTPDisable string // default: "resolvespec_totp_disable"
|
||||
TOTPGetStatus string // default: "resolvespec_totp_get_status"
|
||||
TOTPGetSecret string // default: "resolvespec_totp_get_secret"
|
||||
TOTPRegenerateBackup string // default: "resolvespec_totp_regenerate_backup_codes"
|
||||
TOTPValidateBackupCode string // default: "resolvespec_totp_validate_backup_code"
|
||||
|
||||
// Passkey procedures (DatabasePasskeyProvider)
|
||||
PasskeyStoreCredential string // default: "resolvespec_passkey_store_credential"
|
||||
PasskeyGetCredsByUsername string // default: "resolvespec_passkey_get_credentials_by_username"
|
||||
PasskeyGetCredential string // default: "resolvespec_passkey_get_credential"
|
||||
PasskeyUpdateCounter string // default: "resolvespec_passkey_update_counter"
|
||||
PasskeyGetUserCredentials string // default: "resolvespec_passkey_get_user_credentials"
|
||||
PasskeyDeleteCredential string // default: "resolvespec_passkey_delete_credential"
|
||||
PasskeyUpdateName string // default: "resolvespec_passkey_update_name"
|
||||
PasskeyLogin string // default: "resolvespec_passkey_login"
|
||||
|
||||
// OAuth2 procedures (DatabaseAuthenticator OAuth2 methods)
|
||||
OAuthGetOrCreateUser string // default: "resolvespec_oauth_getorcreateuser"
|
||||
OAuthCreateSession string // default: "resolvespec_oauth_createsession"
|
||||
OAuthGetRefreshToken string // default: "resolvespec_oauth_getrefreshtoken"
|
||||
OAuthUpdateRefreshToken string // default: "resolvespec_oauth_updaterefreshtoken"
|
||||
OAuthGetUser string // default: "resolvespec_oauth_getuser"
|
||||
|
||||
}
|
||||
|
||||
// DefaultSQLNames returns an SQLNames with all default resolvespec_* values.
|
||||
func DefaultSQLNames() *SQLNames {
|
||||
return &SQLNames{
|
||||
Login: "resolvespec_login",
|
||||
Register: "resolvespec_register",
|
||||
Logout: "resolvespec_logout",
|
||||
Session: "resolvespec_session",
|
||||
SessionUpdate: "resolvespec_session_update",
|
||||
RefreshToken: "resolvespec_refresh_token",
|
||||
|
||||
JWTLogin: "resolvespec_jwt_login",
|
||||
JWTLogout: "resolvespec_jwt_logout",
|
||||
|
||||
ColumnSecurity: "resolvespec_column_security",
|
||||
RowSecurity: "resolvespec_row_security",
|
||||
|
||||
TOTPEnable: "resolvespec_totp_enable",
|
||||
TOTPDisable: "resolvespec_totp_disable",
|
||||
TOTPGetStatus: "resolvespec_totp_get_status",
|
||||
TOTPGetSecret: "resolvespec_totp_get_secret",
|
||||
TOTPRegenerateBackup: "resolvespec_totp_regenerate_backup_codes",
|
||||
TOTPValidateBackupCode: "resolvespec_totp_validate_backup_code",
|
||||
|
||||
PasskeyStoreCredential: "resolvespec_passkey_store_credential",
|
||||
PasskeyGetCredsByUsername: "resolvespec_passkey_get_credentials_by_username",
|
||||
PasskeyGetCredential: "resolvespec_passkey_get_credential",
|
||||
PasskeyUpdateCounter: "resolvespec_passkey_update_counter",
|
||||
PasskeyGetUserCredentials: "resolvespec_passkey_get_user_credentials",
|
||||
PasskeyDeleteCredential: "resolvespec_passkey_delete_credential",
|
||||
PasskeyUpdateName: "resolvespec_passkey_update_name",
|
||||
PasskeyLogin: "resolvespec_passkey_login",
|
||||
|
||||
OAuthGetOrCreateUser: "resolvespec_oauth_getorcreateuser",
|
||||
OAuthCreateSession: "resolvespec_oauth_createsession",
|
||||
OAuthGetRefreshToken: "resolvespec_oauth_getrefreshtoken",
|
||||
OAuthUpdateRefreshToken: "resolvespec_oauth_updaterefreshtoken",
|
||||
OAuthGetUser: "resolvespec_oauth_getuser",
|
||||
}
|
||||
}
|
||||
|
||||
// MergeSQLNames returns a copy of base with any non-empty fields from override applied.
|
||||
// If override is nil, a copy of base is returned.
|
||||
func MergeSQLNames(base, override *SQLNames) *SQLNames {
|
||||
if override == nil {
|
||||
copied := *base
|
||||
return &copied
|
||||
}
|
||||
merged := *base
|
||||
if override.Login != "" {
|
||||
merged.Login = override.Login
|
||||
}
|
||||
if override.Register != "" {
|
||||
merged.Register = override.Register
|
||||
}
|
||||
if override.Logout != "" {
|
||||
merged.Logout = override.Logout
|
||||
}
|
||||
if override.Session != "" {
|
||||
merged.Session = override.Session
|
||||
}
|
||||
if override.SessionUpdate != "" {
|
||||
merged.SessionUpdate = override.SessionUpdate
|
||||
}
|
||||
if override.RefreshToken != "" {
|
||||
merged.RefreshToken = override.RefreshToken
|
||||
}
|
||||
if override.JWTLogin != "" {
|
||||
merged.JWTLogin = override.JWTLogin
|
||||
}
|
||||
if override.JWTLogout != "" {
|
||||
merged.JWTLogout = override.JWTLogout
|
||||
}
|
||||
if override.ColumnSecurity != "" {
|
||||
merged.ColumnSecurity = override.ColumnSecurity
|
||||
}
|
||||
if override.RowSecurity != "" {
|
||||
merged.RowSecurity = override.RowSecurity
|
||||
}
|
||||
if override.TOTPEnable != "" {
|
||||
merged.TOTPEnable = override.TOTPEnable
|
||||
}
|
||||
if override.TOTPDisable != "" {
|
||||
merged.TOTPDisable = override.TOTPDisable
|
||||
}
|
||||
if override.TOTPGetStatus != "" {
|
||||
merged.TOTPGetStatus = override.TOTPGetStatus
|
||||
}
|
||||
if override.TOTPGetSecret != "" {
|
||||
merged.TOTPGetSecret = override.TOTPGetSecret
|
||||
}
|
||||
if override.TOTPRegenerateBackup != "" {
|
||||
merged.TOTPRegenerateBackup = override.TOTPRegenerateBackup
|
||||
}
|
||||
if override.TOTPValidateBackupCode != "" {
|
||||
merged.TOTPValidateBackupCode = override.TOTPValidateBackupCode
|
||||
}
|
||||
if override.PasskeyStoreCredential != "" {
|
||||
merged.PasskeyStoreCredential = override.PasskeyStoreCredential
|
||||
}
|
||||
if override.PasskeyGetCredsByUsername != "" {
|
||||
merged.PasskeyGetCredsByUsername = override.PasskeyGetCredsByUsername
|
||||
}
|
||||
if override.PasskeyGetCredential != "" {
|
||||
merged.PasskeyGetCredential = override.PasskeyGetCredential
|
||||
}
|
||||
if override.PasskeyUpdateCounter != "" {
|
||||
merged.PasskeyUpdateCounter = override.PasskeyUpdateCounter
|
||||
}
|
||||
if override.PasskeyGetUserCredentials != "" {
|
||||
merged.PasskeyGetUserCredentials = override.PasskeyGetUserCredentials
|
||||
}
|
||||
if override.PasskeyDeleteCredential != "" {
|
||||
merged.PasskeyDeleteCredential = override.PasskeyDeleteCredential
|
||||
}
|
||||
if override.PasskeyUpdateName != "" {
|
||||
merged.PasskeyUpdateName = override.PasskeyUpdateName
|
||||
}
|
||||
if override.PasskeyLogin != "" {
|
||||
merged.PasskeyLogin = override.PasskeyLogin
|
||||
}
|
||||
if override.OAuthGetOrCreateUser != "" {
|
||||
merged.OAuthGetOrCreateUser = override.OAuthGetOrCreateUser
|
||||
}
|
||||
if override.OAuthCreateSession != "" {
|
||||
merged.OAuthCreateSession = override.OAuthCreateSession
|
||||
}
|
||||
if override.OAuthGetRefreshToken != "" {
|
||||
merged.OAuthGetRefreshToken = override.OAuthGetRefreshToken
|
||||
}
|
||||
if override.OAuthUpdateRefreshToken != "" {
|
||||
merged.OAuthUpdateRefreshToken = override.OAuthUpdateRefreshToken
|
||||
}
|
||||
if override.OAuthGetUser != "" {
|
||||
merged.OAuthGetUser = override.OAuthGetUser
|
||||
}
|
||||
return &merged
|
||||
}
|
||||
|
||||
// ValidateSQLNames checks that all non-empty fields in names are valid SQL identifiers.
|
||||
// Returns an error if any field contains invalid characters.
|
||||
func ValidateSQLNames(names *SQLNames) error {
|
||||
v := reflect.ValueOf(names).Elem()
|
||||
typ := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if field.Kind() != reflect.String {
|
||||
continue
|
||||
}
|
||||
val := field.String()
|
||||
if val != "" && !validSQLIdentifier.MatchString(val) {
|
||||
return fmt.Errorf("SQLNames.%s contains invalid characters: %q", typ.Field(i).Name, val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveSQLNames merges an optional override with defaults.
|
||||
// Used by constructors that accept variadic *SQLNames.
|
||||
func resolveSQLNames(override ...*SQLNames) *SQLNames {
|
||||
if len(override) > 0 && override[0] != nil {
|
||||
return MergeSQLNames(DefaultSQLNames(), override[0])
|
||||
}
|
||||
return DefaultSQLNames()
|
||||
}
|
||||
145
pkg/security/sql_names_test.go
Normal file
145
pkg/security/sql_names_test.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDefaultSQLNames_AllFieldsNonEmpty(t *testing.T) {
|
||||
names := DefaultSQLNames()
|
||||
v := reflect.ValueOf(names).Elem()
|
||||
typ := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if field.Kind() != reflect.String {
|
||||
continue
|
||||
}
|
||||
if field.String() == "" {
|
||||
t.Errorf("DefaultSQLNames().%s is empty", typ.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeSQLNames_PartialOverride(t *testing.T) {
|
||||
base := DefaultSQLNames()
|
||||
override := &SQLNames{
|
||||
Login: "custom_login",
|
||||
TOTPEnable: "custom_totp_enable",
|
||||
PasskeyLogin: "custom_passkey_login",
|
||||
}
|
||||
|
||||
merged := MergeSQLNames(base, override)
|
||||
|
||||
if merged.Login != "custom_login" {
|
||||
t.Errorf("MergeSQLNames().Login = %q, want %q", merged.Login, "custom_login")
|
||||
}
|
||||
if merged.TOTPEnable != "custom_totp_enable" {
|
||||
t.Errorf("MergeSQLNames().TOTPEnable = %q, want %q", merged.TOTPEnable, "custom_totp_enable")
|
||||
}
|
||||
if merged.PasskeyLogin != "custom_passkey_login" {
|
||||
t.Errorf("MergeSQLNames().PasskeyLogin = %q, want %q", merged.PasskeyLogin, "custom_passkey_login")
|
||||
}
|
||||
// Non-overridden fields should retain defaults
|
||||
if merged.Logout != "resolvespec_logout" {
|
||||
t.Errorf("MergeSQLNames().Logout = %q, want %q", merged.Logout, "resolvespec_logout")
|
||||
}
|
||||
if merged.Session != "resolvespec_session" {
|
||||
t.Errorf("MergeSQLNames().Session = %q, want %q", merged.Session, "resolvespec_session")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeSQLNames_NilOverride(t *testing.T) {
|
||||
base := DefaultSQLNames()
|
||||
merged := MergeSQLNames(base, nil)
|
||||
|
||||
// Should be a copy, not the same pointer
|
||||
if merged == base {
|
||||
t.Error("MergeSQLNames with nil override should return a copy, not the same pointer")
|
||||
}
|
||||
|
||||
// All values should match
|
||||
v1 := reflect.ValueOf(base).Elem()
|
||||
v2 := reflect.ValueOf(merged).Elem()
|
||||
typ := v1.Type()
|
||||
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f1 := v1.Field(i)
|
||||
f2 := v2.Field(i)
|
||||
if f1.Kind() != reflect.String {
|
||||
continue
|
||||
}
|
||||
if f1.String() != f2.String() {
|
||||
t.Errorf("MergeSQLNames(base, nil).%s = %q, want %q", typ.Field(i).Name, f2.String(), f1.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeSQLNames_DoesNotMutateBase(t *testing.T) {
|
||||
base := DefaultSQLNames()
|
||||
originalLogin := base.Login
|
||||
|
||||
override := &SQLNames{Login: "custom_login"}
|
||||
_ = MergeSQLNames(base, override)
|
||||
|
||||
if base.Login != originalLogin {
|
||||
t.Errorf("MergeSQLNames mutated base: Login = %q, want %q", base.Login, originalLogin)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeSQLNames_AllFieldsMerged(t *testing.T) {
|
||||
base := DefaultSQLNames()
|
||||
override := &SQLNames{}
|
||||
v := reflect.ValueOf(override).Elem()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if v.Field(i).Kind() == reflect.String {
|
||||
v.Field(i).SetString("custom_sentinel")
|
||||
}
|
||||
}
|
||||
|
||||
merged := MergeSQLNames(base, override)
|
||||
mv := reflect.ValueOf(merged).Elem()
|
||||
typ := mv.Type()
|
||||
for i := 0; i < mv.NumField(); i++ {
|
||||
if mv.Field(i).Kind() != reflect.String {
|
||||
continue
|
||||
}
|
||||
if mv.Field(i).String() != "custom_sentinel" {
|
||||
t.Errorf("MergeSQLNames did not merge field %s", typ.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSQLNames_Valid(t *testing.T) {
|
||||
names := DefaultSQLNames()
|
||||
if err := ValidateSQLNames(names); err != nil {
|
||||
t.Errorf("ValidateSQLNames(defaults) error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSQLNames_Invalid(t *testing.T) {
|
||||
names := DefaultSQLNames()
|
||||
names.Login = "resolvespec_login; DROP TABLE users; --"
|
||||
|
||||
err := ValidateSQLNames(names)
|
||||
if err == nil {
|
||||
t.Error("ValidateSQLNames should reject names with invalid characters")
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveSQLNames_NoOverride(t *testing.T) {
|
||||
names := resolveSQLNames()
|
||||
if names.Login != "resolvespec_login" {
|
||||
t.Errorf("resolveSQLNames().Login = %q, want default", names.Login)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveSQLNames_WithOverride(t *testing.T) {
|
||||
names := resolveSQLNames(&SQLNames{Login: "custom_login"})
|
||||
if names.Login != "custom_login" {
|
||||
t.Errorf("resolveSQLNames().Login = %q, want %q", names.Login, "custom_login")
|
||||
}
|
||||
if names.Logout != "resolvespec_logout" {
|
||||
t.Errorf("resolveSQLNames().Logout = %q, want default", names.Logout)
|
||||
}
|
||||
}
|
||||
188
pkg/security/totp.go
Normal file
188
pkg/security/totp.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/base32"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TwoFactorAuthProvider defines interface for 2FA operations
|
||||
type TwoFactorAuthProvider interface {
|
||||
// Generate2FASecret creates a new secret for a user
|
||||
Generate2FASecret(userID int, issuer, accountName string) (*TwoFactorSecret, error)
|
||||
|
||||
// Validate2FACode verifies a TOTP code
|
||||
Validate2FACode(secret string, code string) (bool, error)
|
||||
|
||||
// Enable2FA activates 2FA for a user (store secret in your database)
|
||||
Enable2FA(userID int, secret string, backupCodes []string) error
|
||||
|
||||
// Disable2FA deactivates 2FA for a user
|
||||
Disable2FA(userID int) error
|
||||
|
||||
// Get2FAStatus checks if user has 2FA enabled
|
||||
Get2FAStatus(userID int) (bool, error)
|
||||
|
||||
// Get2FASecret retrieves the user's 2FA secret
|
||||
Get2FASecret(userID int) (string, error)
|
||||
|
||||
// GenerateBackupCodes creates backup codes for 2FA
|
||||
GenerateBackupCodes(userID int, count int) ([]string, error)
|
||||
|
||||
// ValidateBackupCode checks and consumes a backup code
|
||||
ValidateBackupCode(userID int, code string) (bool, error)
|
||||
}
|
||||
|
||||
// TwoFactorSecret contains 2FA setup information
|
||||
type TwoFactorSecret struct {
|
||||
Secret string `json:"secret"` // Base32 encoded secret
|
||||
QRCodeURL string `json:"qr_code_url"` // URL for QR code generation
|
||||
BackupCodes []string `json:"backup_codes"` // One-time backup codes
|
||||
Issuer string `json:"issuer"` // Application name
|
||||
AccountName string `json:"account_name"` // User identifier (email/username)
|
||||
}
|
||||
|
||||
// TwoFactorConfig holds TOTP configuration
|
||||
type TwoFactorConfig struct {
|
||||
Algorithm string // SHA1, SHA256, SHA512
|
||||
Digits int // Number of digits in code (6 or 8)
|
||||
Period int // Time step in seconds (default 30)
|
||||
SkewWindow int // Number of time steps to check before/after (default 1)
|
||||
}
|
||||
|
||||
// DefaultTwoFactorConfig returns standard TOTP configuration
|
||||
func DefaultTwoFactorConfig() *TwoFactorConfig {
|
||||
return &TwoFactorConfig{
|
||||
Algorithm: "SHA1",
|
||||
Digits: 6,
|
||||
Period: 30,
|
||||
SkewWindow: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// TOTPGenerator handles TOTP code generation and validation
|
||||
type TOTPGenerator struct {
|
||||
config *TwoFactorConfig
|
||||
}
|
||||
|
||||
// NewTOTPGenerator creates a new TOTP generator with config
|
||||
func NewTOTPGenerator(config *TwoFactorConfig) *TOTPGenerator {
|
||||
if config == nil {
|
||||
config = DefaultTwoFactorConfig()
|
||||
}
|
||||
return &TOTPGenerator{
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateSecret creates a random base32-encoded secret
|
||||
func (t *TOTPGenerator) GenerateSecret() (string, error) {
|
||||
secret := make([]byte, 20)
|
||||
_, err := rand.Read(secret)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate random secret: %w", err)
|
||||
}
|
||||
return base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(secret), nil
|
||||
}
|
||||
|
||||
// GenerateQRCodeURL creates a URL for QR code generation
|
||||
func (t *TOTPGenerator) GenerateQRCodeURL(secret, issuer, accountName string) string {
|
||||
params := url.Values{}
|
||||
params.Set("secret", secret)
|
||||
params.Set("issuer", issuer)
|
||||
params.Set("algorithm", t.config.Algorithm)
|
||||
params.Set("digits", fmt.Sprintf("%d", t.config.Digits))
|
||||
params.Set("period", fmt.Sprintf("%d", t.config.Period))
|
||||
|
||||
label := url.PathEscape(fmt.Sprintf("%s:%s", issuer, accountName))
|
||||
return fmt.Sprintf("otpauth://totp/%s?%s", label, params.Encode())
|
||||
}
|
||||
|
||||
// GenerateCode creates a TOTP code for a given time
|
||||
func (t *TOTPGenerator) GenerateCode(secret string, timestamp time.Time) (string, error) {
|
||||
// Decode secret
|
||||
key, err := base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(strings.ToUpper(secret))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid secret: %w", err)
|
||||
}
|
||||
|
||||
// Calculate counter (time steps since Unix epoch)
|
||||
counter := uint64(timestamp.Unix()) / uint64(t.config.Period)
|
||||
|
||||
// Generate HMAC
|
||||
h := t.getHashFunc()
|
||||
mac := hmac.New(h, key)
|
||||
|
||||
// Convert counter to 8-byte array
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, counter)
|
||||
mac.Write(buf)
|
||||
|
||||
sum := mac.Sum(nil)
|
||||
|
||||
// Dynamic truncation
|
||||
offset := sum[len(sum)-1] & 0x0f
|
||||
truncated := binary.BigEndian.Uint32(sum[offset:]) & 0x7fffffff
|
||||
|
||||
// Generate code with specified digits
|
||||
code := truncated % uint32(math.Pow10(t.config.Digits))
|
||||
|
||||
format := fmt.Sprintf("%%0%dd", t.config.Digits)
|
||||
return fmt.Sprintf(format, code), nil
|
||||
}
|
||||
|
||||
// ValidateCode checks if a code is valid for the secret
|
||||
func (t *TOTPGenerator) ValidateCode(secret, code string) (bool, error) {
|
||||
now := time.Now()
|
||||
|
||||
// Check current time and skew window
|
||||
for i := -t.config.SkewWindow; i <= t.config.SkewWindow; i++ {
|
||||
timestamp := now.Add(time.Duration(i*t.config.Period) * time.Second)
|
||||
expected, err := t.GenerateCode(secret, timestamp)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if code == expected {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// getHashFunc returns the hash function based on algorithm
|
||||
func (t *TOTPGenerator) getHashFunc() func() hash.Hash {
|
||||
switch strings.ToUpper(t.config.Algorithm) {
|
||||
case "SHA256":
|
||||
return sha256.New
|
||||
case "SHA512":
|
||||
return sha512.New
|
||||
default:
|
||||
return sha1.New
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateBackupCodes creates random backup codes
|
||||
func GenerateBackupCodes(count int) ([]string, error) {
|
||||
codes := make([]string, count)
|
||||
for i := 0; i < count; i++ {
|
||||
code := make([]byte, 4)
|
||||
_, err := rand.Read(code)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate backup code: %w", err)
|
||||
}
|
||||
codes[i] = fmt.Sprintf("%08X", binary.BigEndian.Uint32(code))
|
||||
}
|
||||
return codes, nil
|
||||
}
|
||||
399
pkg/security/totp_integration_test.go
Normal file
399
pkg/security/totp_integration_test.go
Normal file
@@ -0,0 +1,399 @@
|
||||
package security_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
)
|
||||
|
||||
var ErrInvalidCredentials = errors.New("invalid credentials")
|
||||
|
||||
// MockAuthenticator is a simple authenticator for testing 2FA
|
||||
type MockAuthenticator struct {
|
||||
users map[string]*security.UserContext
|
||||
}
|
||||
|
||||
func NewMockAuthenticator() *MockAuthenticator {
|
||||
return &MockAuthenticator{
|
||||
users: map[string]*security.UserContext{
|
||||
"testuser": {
|
||||
UserID: 1,
|
||||
UserName: "testuser",
|
||||
Email: "test@example.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockAuthenticator) Login(ctx context.Context, req security.LoginRequest) (*security.LoginResponse, error) {
|
||||
user, exists := m.users[req.Username]
|
||||
if !exists || req.Password != "password" {
|
||||
return nil, ErrInvalidCredentials
|
||||
}
|
||||
|
||||
return &security.LoginResponse{
|
||||
Token: "mock-token",
|
||||
RefreshToken: "mock-refresh-token",
|
||||
User: user,
|
||||
ExpiresIn: 3600,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockAuthenticator) Logout(ctx context.Context, req security.LogoutRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockAuthenticator) Authenticate(r *http.Request) (*security.UserContext, error) {
|
||||
return m.users["testuser"], nil
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Setup(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup 2FA
|
||||
secret, err := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Setup2FA() error = %v", err)
|
||||
}
|
||||
|
||||
if secret.Secret == "" {
|
||||
t.Error("Setup2FA() returned empty secret")
|
||||
}
|
||||
|
||||
if secret.QRCodeURL == "" {
|
||||
t.Error("Setup2FA() returned empty QR code URL")
|
||||
}
|
||||
|
||||
if len(secret.BackupCodes) == 0 {
|
||||
t.Error("Setup2FA() returned no backup codes")
|
||||
}
|
||||
|
||||
if secret.Issuer != "TestApp" {
|
||||
t.Errorf("Setup2FA() Issuer = %s, want TestApp", secret.Issuer)
|
||||
}
|
||||
|
||||
if secret.AccountName != "test@example.com" {
|
||||
t.Errorf("Setup2FA() AccountName = %s, want test@example.com", secret.AccountName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Enable2FA(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup 2FA
|
||||
secret, err := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Setup2FA() error = %v", err)
|
||||
}
|
||||
|
||||
// Generate valid code
|
||||
totp := security.NewTOTPGenerator(nil)
|
||||
code, err := totp.GenerateCode(secret.Secret, time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateCode() error = %v", err)
|
||||
}
|
||||
|
||||
// Enable 2FA with valid code
|
||||
err = tfaAuth.Enable2FA(1, secret.Secret, code)
|
||||
if err != nil {
|
||||
t.Errorf("Enable2FA() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify 2FA is enabled
|
||||
status, err := provider.Get2FAStatus(1)
|
||||
if err != nil {
|
||||
t.Fatalf("Get2FAStatus() error = %v", err)
|
||||
}
|
||||
|
||||
if !status {
|
||||
t.Error("Enable2FA() did not enable 2FA")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Enable2FA_InvalidCode(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup 2FA
|
||||
secret, err := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Setup2FA() error = %v", err)
|
||||
}
|
||||
|
||||
// Try to enable with invalid code
|
||||
err = tfaAuth.Enable2FA(1, secret.Secret, "000000")
|
||||
if err == nil {
|
||||
t.Error("Enable2FA() should fail with invalid code")
|
||||
}
|
||||
|
||||
// Verify 2FA is not enabled
|
||||
status, _ := provider.Get2FAStatus(1)
|
||||
if status {
|
||||
t.Error("Enable2FA() should not enable 2FA with invalid code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Login_Without2FA(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
req := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
}
|
||||
|
||||
resp, err := tfaAuth.Login(context.Background(), req)
|
||||
if err != nil {
|
||||
t.Fatalf("Login() error = %v", err)
|
||||
}
|
||||
|
||||
if resp.Requires2FA {
|
||||
t.Error("Login() should not require 2FA when not enabled")
|
||||
}
|
||||
|
||||
if resp.Token == "" {
|
||||
t.Error("Login() should return token when 2FA not required")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Login_With2FA_NoCode(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup and enable 2FA
|
||||
secret, _ := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
totp := security.NewTOTPGenerator(nil)
|
||||
code, _ := totp.GenerateCode(secret.Secret, time.Now())
|
||||
tfaAuth.Enable2FA(1, secret.Secret, code)
|
||||
|
||||
// Try to login without 2FA code
|
||||
req := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
}
|
||||
|
||||
resp, err := tfaAuth.Login(context.Background(), req)
|
||||
if err != nil {
|
||||
t.Fatalf("Login() error = %v", err)
|
||||
}
|
||||
|
||||
if !resp.Requires2FA {
|
||||
t.Error("Login() should require 2FA when enabled")
|
||||
}
|
||||
|
||||
if resp.Token != "" {
|
||||
t.Error("Login() should not return token when 2FA required but not provided")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Login_With2FA_ValidCode(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup and enable 2FA
|
||||
secret, _ := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
totp := security.NewTOTPGenerator(nil)
|
||||
code, _ := totp.GenerateCode(secret.Secret, time.Now())
|
||||
tfaAuth.Enable2FA(1, secret.Secret, code)
|
||||
|
||||
// Generate new valid code for login
|
||||
newCode, _ := totp.GenerateCode(secret.Secret, time.Now())
|
||||
|
||||
// Login with 2FA code
|
||||
req := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
TwoFactorCode: newCode,
|
||||
}
|
||||
|
||||
resp, err := tfaAuth.Login(context.Background(), req)
|
||||
if err != nil {
|
||||
t.Fatalf("Login() error = %v", err)
|
||||
}
|
||||
|
||||
if resp.Requires2FA {
|
||||
t.Error("Login() should not require 2FA when valid code provided")
|
||||
}
|
||||
|
||||
if resp.Token == "" {
|
||||
t.Error("Login() should return token when 2FA validated")
|
||||
}
|
||||
|
||||
if !resp.User.TwoFactorEnabled {
|
||||
t.Error("Login() should set TwoFactorEnabled on user")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Login_With2FA_InvalidCode(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup and enable 2FA
|
||||
secret, _ := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
totp := security.NewTOTPGenerator(nil)
|
||||
code, _ := totp.GenerateCode(secret.Secret, time.Now())
|
||||
tfaAuth.Enable2FA(1, secret.Secret, code)
|
||||
|
||||
// Try to login with invalid code
|
||||
req := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
TwoFactorCode: "000000",
|
||||
}
|
||||
|
||||
_, err := tfaAuth.Login(context.Background(), req)
|
||||
if err == nil {
|
||||
t.Error("Login() should fail with invalid 2FA code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Login_WithBackupCode(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup and enable 2FA
|
||||
secret, _ := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
totp := security.NewTOTPGenerator(nil)
|
||||
code, _ := totp.GenerateCode(secret.Secret, time.Now())
|
||||
tfaAuth.Enable2FA(1, secret.Secret, code)
|
||||
|
||||
// Get backup codes
|
||||
backupCodes, _ := tfaAuth.RegenerateBackupCodes(1, 10)
|
||||
|
||||
// Login with backup code
|
||||
req := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
TwoFactorCode: backupCodes[0],
|
||||
}
|
||||
|
||||
resp, err := tfaAuth.Login(context.Background(), req)
|
||||
if err != nil {
|
||||
t.Fatalf("Login() with backup code error = %v", err)
|
||||
}
|
||||
|
||||
if resp.Token == "" {
|
||||
t.Error("Login() should return token when backup code validated")
|
||||
}
|
||||
|
||||
// Try to use same backup code again
|
||||
req2 := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
TwoFactorCode: backupCodes[0],
|
||||
}
|
||||
|
||||
_, err = tfaAuth.Login(context.Background(), req2)
|
||||
if err == nil {
|
||||
t.Error("Login() should fail when reusing backup code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_Disable2FA(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup and enable 2FA
|
||||
secret, _ := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
totp := security.NewTOTPGenerator(nil)
|
||||
code, _ := totp.GenerateCode(secret.Secret, time.Now())
|
||||
tfaAuth.Enable2FA(1, secret.Secret, code)
|
||||
|
||||
// Disable 2FA
|
||||
err := tfaAuth.Disable2FA(1)
|
||||
if err != nil {
|
||||
t.Errorf("Disable2FA() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify 2FA is disabled
|
||||
status, _ := provider.Get2FAStatus(1)
|
||||
if status {
|
||||
t.Error("Disable2FA() did not disable 2FA")
|
||||
}
|
||||
|
||||
// Login should not require 2FA
|
||||
req := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
}
|
||||
|
||||
resp, err := tfaAuth.Login(context.Background(), req)
|
||||
if err != nil {
|
||||
t.Fatalf("Login() error = %v", err)
|
||||
}
|
||||
|
||||
if resp.Requires2FA {
|
||||
t.Error("Login() should not require 2FA after disabling")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoFactorAuthenticator_RegenerateBackupCodes(t *testing.T) {
|
||||
baseAuth := NewMockAuthenticator()
|
||||
provider := security.NewMemoryTwoFactorProvider(nil)
|
||||
tfaAuth := security.NewTwoFactorAuthenticator(baseAuth, provider, nil)
|
||||
|
||||
// Setup and enable 2FA
|
||||
secret, _ := tfaAuth.Setup2FA(1, "TestApp", "test@example.com")
|
||||
totp := security.NewTOTPGenerator(nil)
|
||||
code, _ := totp.GenerateCode(secret.Secret, time.Now())
|
||||
tfaAuth.Enable2FA(1, secret.Secret, code)
|
||||
|
||||
// Get initial backup codes
|
||||
codes1, err := tfaAuth.RegenerateBackupCodes(1, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("RegenerateBackupCodes() error = %v", err)
|
||||
}
|
||||
|
||||
if len(codes1) != 10 {
|
||||
t.Errorf("RegenerateBackupCodes() returned %d codes, want 10", len(codes1))
|
||||
}
|
||||
|
||||
// Regenerate backup codes
|
||||
codes2, err := tfaAuth.RegenerateBackupCodes(1, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("RegenerateBackupCodes() error = %v", err)
|
||||
}
|
||||
|
||||
// Old codes should not work
|
||||
req := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
TwoFactorCode: codes1[0],
|
||||
}
|
||||
|
||||
_, err = tfaAuth.Login(context.Background(), req)
|
||||
if err == nil {
|
||||
t.Error("Login() should fail with old backup code after regeneration")
|
||||
}
|
||||
|
||||
// New codes should work
|
||||
req2 := security.LoginRequest{
|
||||
Username: "testuser",
|
||||
Password: "password",
|
||||
TwoFactorCode: codes2[0],
|
||||
}
|
||||
|
||||
resp, err := tfaAuth.Login(context.Background(), req2)
|
||||
if err != nil {
|
||||
t.Fatalf("Login() with new backup code error = %v", err)
|
||||
}
|
||||
|
||||
if resp.Token == "" {
|
||||
t.Error("Login() should return token with new backup code")
|
||||
}
|
||||
}
|
||||
134
pkg/security/totp_middleware.go
Normal file
134
pkg/security/totp_middleware.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// TwoFactorAuthenticator wraps an Authenticator and adds 2FA support
|
||||
type TwoFactorAuthenticator struct {
|
||||
baseAuth Authenticator
|
||||
totp *TOTPGenerator
|
||||
provider TwoFactorAuthProvider
|
||||
}
|
||||
|
||||
// NewTwoFactorAuthenticator creates a new 2FA-enabled authenticator
|
||||
func NewTwoFactorAuthenticator(baseAuth Authenticator, provider TwoFactorAuthProvider, config *TwoFactorConfig) *TwoFactorAuthenticator {
|
||||
if config == nil {
|
||||
config = DefaultTwoFactorConfig()
|
||||
}
|
||||
return &TwoFactorAuthenticator{
|
||||
baseAuth: baseAuth,
|
||||
totp: NewTOTPGenerator(config),
|
||||
provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
// Login authenticates with 2FA support
|
||||
func (t *TwoFactorAuthenticator) Login(ctx context.Context, req LoginRequest) (*LoginResponse, error) {
|
||||
// First, perform standard authentication
|
||||
resp, err := t.baseAuth.Login(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if user has 2FA enabled
|
||||
if resp.User == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
has2FA, err := t.provider.Get2FAStatus(resp.User.UserID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check 2FA status: %w", err)
|
||||
}
|
||||
|
||||
if !has2FA {
|
||||
// User doesn't have 2FA enabled, return normal response
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// User has 2FA enabled
|
||||
if req.TwoFactorCode == "" {
|
||||
// No 2FA code provided, require it
|
||||
resp.Requires2FA = true
|
||||
resp.Token = "" // Don't return token until 2FA is verified
|
||||
resp.RefreshToken = ""
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Validate 2FA code
|
||||
secret, err := t.provider.Get2FASecret(resp.User.UserID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get 2FA secret: %w", err)
|
||||
}
|
||||
|
||||
// Try TOTP code first
|
||||
valid, err := t.totp.ValidateCode(secret, req.TwoFactorCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to validate 2FA code: %w", err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
// Try backup code
|
||||
valid, err = t.provider.ValidateBackupCode(resp.User.UserID, req.TwoFactorCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to validate backup code: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return nil, fmt.Errorf("invalid 2FA code")
|
||||
}
|
||||
|
||||
// 2FA verified, return full response with token
|
||||
resp.User.TwoFactorEnabled = true
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Logout delegates to base authenticator
|
||||
func (t *TwoFactorAuthenticator) Logout(ctx context.Context, req LogoutRequest) error {
|
||||
return t.baseAuth.Logout(ctx, req)
|
||||
}
|
||||
|
||||
// Authenticate delegates to base authenticator
|
||||
func (t *TwoFactorAuthenticator) Authenticate(r *http.Request) (*UserContext, error) {
|
||||
return t.baseAuth.Authenticate(r)
|
||||
}
|
||||
|
||||
// Setup2FA initiates 2FA setup for a user
|
||||
func (t *TwoFactorAuthenticator) Setup2FA(userID int, issuer, accountName string) (*TwoFactorSecret, error) {
|
||||
return t.provider.Generate2FASecret(userID, issuer, accountName)
|
||||
}
|
||||
|
||||
// Enable2FA completes 2FA setup after user confirms with a valid code
|
||||
func (t *TwoFactorAuthenticator) Enable2FA(userID int, secret, verificationCode string) error {
|
||||
// Verify the code before enabling
|
||||
valid, err := t.totp.ValidateCode(secret, verificationCode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to validate code: %w", err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid verification code")
|
||||
}
|
||||
|
||||
// Generate backup codes
|
||||
backupCodes, err := t.provider.GenerateBackupCodes(userID, 10)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate backup codes: %w", err)
|
||||
}
|
||||
|
||||
// Enable 2FA
|
||||
return t.provider.Enable2FA(userID, secret, backupCodes)
|
||||
}
|
||||
|
||||
// Disable2FA removes 2FA from a user account
|
||||
func (t *TwoFactorAuthenticator) Disable2FA(userID int) error {
|
||||
return t.provider.Disable2FA(userID)
|
||||
}
|
||||
|
||||
// RegenerateBackupCodes creates new backup codes for a user
|
||||
func (t *TwoFactorAuthenticator) RegenerateBackupCodes(userID int, count int) ([]string, error) {
|
||||
return t.provider.GenerateBackupCodes(userID, count)
|
||||
}
|
||||
229
pkg/security/totp_provider_database.go
Normal file
229
pkg/security/totp_provider_database.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// DatabaseTwoFactorProvider implements TwoFactorAuthProvider using PostgreSQL stored procedures
|
||||
// Procedure names are configurable via SQLNames (see DefaultSQLNames for defaults)
|
||||
// See totp_database_schema.sql for procedure definitions
|
||||
type DatabaseTwoFactorProvider struct {
|
||||
db *sql.DB
|
||||
totpGen *TOTPGenerator
|
||||
sqlNames *SQLNames
|
||||
}
|
||||
|
||||
// NewDatabaseTwoFactorProvider creates a new database-backed 2FA provider
|
||||
func NewDatabaseTwoFactorProvider(db *sql.DB, config *TwoFactorConfig, names ...*SQLNames) *DatabaseTwoFactorProvider {
|
||||
if config == nil {
|
||||
config = DefaultTwoFactorConfig()
|
||||
}
|
||||
return &DatabaseTwoFactorProvider{
|
||||
db: db,
|
||||
totpGen: NewTOTPGenerator(config),
|
||||
sqlNames: resolveSQLNames(names...),
|
||||
}
|
||||
}
|
||||
|
||||
// Generate2FASecret creates a new secret for a user
|
||||
func (p *DatabaseTwoFactorProvider) Generate2FASecret(userID int, issuer, accountName string) (*TwoFactorSecret, error) {
|
||||
secret, err := p.totpGen.GenerateSecret()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate secret: %w", err)
|
||||
}
|
||||
|
||||
qrURL := p.totpGen.GenerateQRCodeURL(secret, issuer, accountName)
|
||||
|
||||
backupCodes, err := GenerateBackupCodes(10)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate backup codes: %w", err)
|
||||
}
|
||||
|
||||
return &TwoFactorSecret{
|
||||
Secret: secret,
|
||||
QRCodeURL: qrURL,
|
||||
BackupCodes: backupCodes,
|
||||
Issuer: issuer,
|
||||
AccountName: accountName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Validate2FACode verifies a TOTP code
|
||||
func (p *DatabaseTwoFactorProvider) Validate2FACode(secret string, code string) (bool, error) {
|
||||
return p.totpGen.ValidateCode(secret, code)
|
||||
}
|
||||
|
||||
// Enable2FA activates 2FA for a user
|
||||
func (p *DatabaseTwoFactorProvider) Enable2FA(userID int, secret string, backupCodes []string) error {
|
||||
// Hash backup codes for secure storage
|
||||
hashedCodes := make([]string, len(backupCodes))
|
||||
for i, code := range backupCodes {
|
||||
hash := sha256.Sum256([]byte(code))
|
||||
hashedCodes[i] = hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// Convert to JSON array
|
||||
codesJSON, err := json.Marshal(hashedCodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal backup codes: %w", err)
|
||||
}
|
||||
|
||||
// Call stored procedure
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error FROM %s($1, $2, $3::jsonb)`, p.sqlNames.TOTPEnable)
|
||||
err = p.db.QueryRow(query, userID, secret, string(codesJSON)).Scan(&success, &errorMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("enable 2FA query failed: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return fmt.Errorf("failed to enable 2FA")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disable2FA deactivates 2FA for a user
|
||||
func (p *DatabaseTwoFactorProvider) Disable2FA(userID int) error {
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error FROM %s($1)`, p.sqlNames.TOTPDisable)
|
||||
err := p.db.QueryRow(query, userID).Scan(&success, &errorMsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("disable 2FA query failed: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return fmt.Errorf("failed to disable 2FA")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get2FAStatus checks if user has 2FA enabled
|
||||
func (p *DatabaseTwoFactorProvider) Get2FAStatus(userID int) (bool, error) {
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var enabled bool
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_enabled FROM %s($1)`, p.sqlNames.TOTPGetStatus)
|
||||
err := p.db.QueryRow(query, userID).Scan(&success, &errorMsg, &enabled)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("get 2FA status query failed: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return false, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return false, fmt.Errorf("failed to get 2FA status")
|
||||
}
|
||||
|
||||
return enabled, nil
|
||||
}
|
||||
|
||||
// Get2FASecret retrieves the user's 2FA secret
|
||||
func (p *DatabaseTwoFactorProvider) Get2FASecret(userID int) (string, error) {
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var secret sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_secret FROM %s($1)`, p.sqlNames.TOTPGetSecret)
|
||||
err := p.db.QueryRow(query, userID).Scan(&success, &errorMsg, &secret)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get 2FA secret query failed: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return "", fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return "", fmt.Errorf("failed to get 2FA secret")
|
||||
}
|
||||
|
||||
if !secret.Valid {
|
||||
return "", fmt.Errorf("2FA secret not found")
|
||||
}
|
||||
|
||||
return secret.String, nil
|
||||
}
|
||||
|
||||
// GenerateBackupCodes creates backup codes for 2FA
|
||||
func (p *DatabaseTwoFactorProvider) GenerateBackupCodes(userID int, count int) ([]string, error) {
|
||||
codes, err := GenerateBackupCodes(count)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate backup codes: %w", err)
|
||||
}
|
||||
|
||||
// Hash backup codes for storage
|
||||
hashedCodes := make([]string, len(codes))
|
||||
for i, code := range codes {
|
||||
hash := sha256.Sum256([]byte(code))
|
||||
hashedCodes[i] = hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// Convert to JSON array
|
||||
codesJSON, err := json.Marshal(hashedCodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal backup codes: %w", err)
|
||||
}
|
||||
|
||||
// Call stored procedure
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error FROM %s($1, $2::jsonb)`, p.sqlNames.TOTPRegenerateBackup)
|
||||
err = p.db.QueryRow(query, userID, string(codesJSON)).Scan(&success, &errorMsg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("regenerate backup codes query failed: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return nil, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to regenerate backup codes")
|
||||
}
|
||||
|
||||
// Return unhashed codes to user (only time they see them)
|
||||
return codes, nil
|
||||
}
|
||||
|
||||
// ValidateBackupCode checks and consumes a backup code
|
||||
func (p *DatabaseTwoFactorProvider) ValidateBackupCode(userID int, code string) (bool, error) {
|
||||
// Hash the code
|
||||
hash := sha256.Sum256([]byte(code))
|
||||
codeHash := hex.EncodeToString(hash[:])
|
||||
|
||||
var success bool
|
||||
var errorMsg sql.NullString
|
||||
var valid bool
|
||||
|
||||
query := fmt.Sprintf(`SELECT p_success, p_error, p_valid FROM %s($1, $2)`, p.sqlNames.TOTPValidateBackupCode)
|
||||
err := p.db.QueryRow(query, userID, codeHash).Scan(&success, &errorMsg, &valid)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("validate backup code query failed: %w", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
if errorMsg.Valid {
|
||||
return false, fmt.Errorf("%s", errorMsg.String)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return valid, nil
|
||||
}
|
||||
218
pkg/security/totp_provider_database_test.go
Normal file
218
pkg/security/totp_provider_database_test.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package security_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/security"
|
||||
)
|
||||
|
||||
// Note: These tests require a PostgreSQL database with the schema from totp_database_schema.sql
|
||||
// Set TEST_DATABASE_URL environment variable or skip tests
|
||||
|
||||
func setupTestDB(t *testing.T) *sql.DB {
|
||||
// Skip if no test database configured
|
||||
t.Skip("Database tests require TEST_DATABASE_URL environment variable")
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestDatabaseTwoFactorProvider_Enable2FA(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
if db == nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := security.NewDatabaseTwoFactorProvider(db, nil)
|
||||
|
||||
// Generate secret and backup codes
|
||||
secret, err := provider.Generate2FASecret(1, "TestApp", "test@example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Generate2FASecret() error = %v", err)
|
||||
}
|
||||
|
||||
// Enable 2FA
|
||||
err = provider.Enable2FA(1, secret.Secret, secret.BackupCodes)
|
||||
if err != nil {
|
||||
t.Errorf("Enable2FA() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify enabled
|
||||
enabled, err := provider.Get2FAStatus(1)
|
||||
if err != nil {
|
||||
t.Fatalf("Get2FAStatus() error = %v", err)
|
||||
}
|
||||
|
||||
if !enabled {
|
||||
t.Error("Get2FAStatus() = false, want true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseTwoFactorProvider_Disable2FA(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
if db == nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := security.NewDatabaseTwoFactorProvider(db, nil)
|
||||
|
||||
// Enable first
|
||||
secret, _ := provider.Generate2FASecret(1, "TestApp", "test@example.com")
|
||||
provider.Enable2FA(1, secret.Secret, secret.BackupCodes)
|
||||
|
||||
// Disable
|
||||
err := provider.Disable2FA(1)
|
||||
if err != nil {
|
||||
t.Errorf("Disable2FA() error = %v", err)
|
||||
}
|
||||
|
||||
// Verify disabled
|
||||
enabled, err := provider.Get2FAStatus(1)
|
||||
if err != nil {
|
||||
t.Fatalf("Get2FAStatus() error = %v", err)
|
||||
}
|
||||
|
||||
if enabled {
|
||||
t.Error("Get2FAStatus() = true, want false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseTwoFactorProvider_GetSecret(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
if db == nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := security.NewDatabaseTwoFactorProvider(db, nil)
|
||||
|
||||
// Enable 2FA
|
||||
secret, _ := provider.Generate2FASecret(1, "TestApp", "test@example.com")
|
||||
provider.Enable2FA(1, secret.Secret, secret.BackupCodes)
|
||||
|
||||
// Retrieve secret
|
||||
retrieved, err := provider.Get2FASecret(1)
|
||||
if err != nil {
|
||||
t.Errorf("Get2FASecret() error = %v", err)
|
||||
}
|
||||
|
||||
if retrieved != secret.Secret {
|
||||
t.Errorf("Get2FASecret() = %v, want %v", retrieved, secret.Secret)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseTwoFactorProvider_ValidateBackupCode(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
if db == nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := security.NewDatabaseTwoFactorProvider(db, nil)
|
||||
|
||||
// Enable 2FA
|
||||
secret, _ := provider.Generate2FASecret(1, "TestApp", "test@example.com")
|
||||
provider.Enable2FA(1, secret.Secret, secret.BackupCodes)
|
||||
|
||||
// Validate backup code
|
||||
valid, err := provider.ValidateBackupCode(1, secret.BackupCodes[0])
|
||||
if err != nil {
|
||||
t.Errorf("ValidateBackupCode() error = %v", err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
t.Error("ValidateBackupCode() = false, want true")
|
||||
}
|
||||
|
||||
// Try to use same code again
|
||||
valid, err = provider.ValidateBackupCode(1, secret.BackupCodes[0])
|
||||
if err == nil {
|
||||
t.Error("ValidateBackupCode() should error on reuse")
|
||||
}
|
||||
|
||||
// Try invalid code
|
||||
valid, err = provider.ValidateBackupCode(1, "INVALID")
|
||||
if err != nil {
|
||||
t.Errorf("ValidateBackupCode() error = %v", err)
|
||||
}
|
||||
|
||||
if valid {
|
||||
t.Error("ValidateBackupCode() = true for invalid code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseTwoFactorProvider_RegenerateBackupCodes(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
if db == nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := security.NewDatabaseTwoFactorProvider(db, nil)
|
||||
|
||||
// Enable 2FA
|
||||
secret, _ := provider.Generate2FASecret(1, "TestApp", "test@example.com")
|
||||
provider.Enable2FA(1, secret.Secret, secret.BackupCodes)
|
||||
|
||||
// Regenerate codes
|
||||
newCodes, err := provider.GenerateBackupCodes(1, 10)
|
||||
if err != nil {
|
||||
t.Errorf("GenerateBackupCodes() error = %v", err)
|
||||
}
|
||||
|
||||
if len(newCodes) != 10 {
|
||||
t.Errorf("GenerateBackupCodes() returned %d codes, want 10", len(newCodes))
|
||||
}
|
||||
|
||||
// Old codes should not work
|
||||
valid, _ := provider.ValidateBackupCode(1, secret.BackupCodes[0])
|
||||
if valid {
|
||||
t.Error("Old backup code should not work after regeneration")
|
||||
}
|
||||
|
||||
// New codes should work
|
||||
valid, err = provider.ValidateBackupCode(1, newCodes[0])
|
||||
if err != nil {
|
||||
t.Errorf("ValidateBackupCode() error = %v", err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
t.Error("ValidateBackupCode() = false for new code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseTwoFactorProvider_Generate2FASecret(t *testing.T) {
|
||||
db := setupTestDB(t)
|
||||
if db == nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
provider := security.NewDatabaseTwoFactorProvider(db, nil)
|
||||
|
||||
secret, err := provider.Generate2FASecret(1, "TestApp", "test@example.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Generate2FASecret() error = %v", err)
|
||||
}
|
||||
|
||||
if secret.Secret == "" {
|
||||
t.Error("Generate2FASecret() returned empty secret")
|
||||
}
|
||||
|
||||
if secret.QRCodeURL == "" {
|
||||
t.Error("Generate2FASecret() returned empty QR code URL")
|
||||
}
|
||||
|
||||
if len(secret.BackupCodes) != 10 {
|
||||
t.Errorf("Generate2FASecret() returned %d backup codes, want 10", len(secret.BackupCodes))
|
||||
}
|
||||
|
||||
if secret.Issuer != "TestApp" {
|
||||
t.Errorf("Generate2FASecret() Issuer = %v, want TestApp", secret.Issuer)
|
||||
}
|
||||
|
||||
if secret.AccountName != "test@example.com" {
|
||||
t.Errorf("Generate2FASecret() AccountName = %v, want test@example.com", secret.AccountName)
|
||||
}
|
||||
}
|
||||
156
pkg/security/totp_provider_memory.go
Normal file
156
pkg/security/totp_provider_memory.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MemoryTwoFactorProvider is an in-memory implementation of TwoFactorAuthProvider for testing/examples
|
||||
type MemoryTwoFactorProvider struct {
|
||||
mu sync.RWMutex
|
||||
secrets map[int]string // userID -> secret
|
||||
backupCodes map[int]map[string]bool // userID -> backup codes (code -> used)
|
||||
totpGen *TOTPGenerator
|
||||
}
|
||||
|
||||
// NewMemoryTwoFactorProvider creates a new in-memory 2FA provider
|
||||
func NewMemoryTwoFactorProvider(config *TwoFactorConfig) *MemoryTwoFactorProvider {
|
||||
if config == nil {
|
||||
config = DefaultTwoFactorConfig()
|
||||
}
|
||||
return &MemoryTwoFactorProvider{
|
||||
secrets: make(map[int]string),
|
||||
backupCodes: make(map[int]map[string]bool),
|
||||
totpGen: NewTOTPGenerator(config),
|
||||
}
|
||||
}
|
||||
|
||||
// Generate2FASecret creates a new secret for a user
|
||||
func (m *MemoryTwoFactorProvider) Generate2FASecret(userID int, issuer, accountName string) (*TwoFactorSecret, error) {
|
||||
secret, err := m.totpGen.GenerateSecret()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qrURL := m.totpGen.GenerateQRCodeURL(secret, issuer, accountName)
|
||||
|
||||
backupCodes, err := GenerateBackupCodes(10)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TwoFactorSecret{
|
||||
Secret: secret,
|
||||
QRCodeURL: qrURL,
|
||||
BackupCodes: backupCodes,
|
||||
Issuer: issuer,
|
||||
AccountName: accountName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Validate2FACode verifies a TOTP code
|
||||
func (m *MemoryTwoFactorProvider) Validate2FACode(secret string, code string) (bool, error) {
|
||||
return m.totpGen.ValidateCode(secret, code)
|
||||
}
|
||||
|
||||
// Enable2FA activates 2FA for a user
|
||||
func (m *MemoryTwoFactorProvider) Enable2FA(userID int, secret string, backupCodes []string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.secrets[userID] = secret
|
||||
|
||||
// Store backup codes
|
||||
if m.backupCodes[userID] == nil {
|
||||
m.backupCodes[userID] = make(map[string]bool)
|
||||
}
|
||||
|
||||
for _, code := range backupCodes {
|
||||
// Hash backup codes for security
|
||||
hash := sha256.Sum256([]byte(code))
|
||||
m.backupCodes[userID][hex.EncodeToString(hash[:])] = false
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disable2FA deactivates 2FA for a user
|
||||
func (m *MemoryTwoFactorProvider) Disable2FA(userID int) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
delete(m.secrets, userID)
|
||||
delete(m.backupCodes, userID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get2FAStatus checks if user has 2FA enabled
|
||||
func (m *MemoryTwoFactorProvider) Get2FAStatus(userID int) (bool, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
_, exists := m.secrets[userID]
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// Get2FASecret retrieves the user's 2FA secret
|
||||
func (m *MemoryTwoFactorProvider) Get2FASecret(userID int) (string, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
secret, exists := m.secrets[userID]
|
||||
if !exists {
|
||||
return "", fmt.Errorf("user does not have 2FA enabled")
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
// GenerateBackupCodes creates backup codes for 2FA
|
||||
func (m *MemoryTwoFactorProvider) GenerateBackupCodes(userID int, count int) ([]string, error) {
|
||||
codes, err := GenerateBackupCodes(count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Clear old backup codes and store new ones
|
||||
m.backupCodes[userID] = make(map[string]bool)
|
||||
for _, code := range codes {
|
||||
hash := sha256.Sum256([]byte(code))
|
||||
m.backupCodes[userID][hex.EncodeToString(hash[:])] = false
|
||||
}
|
||||
|
||||
return codes, nil
|
||||
}
|
||||
|
||||
// ValidateBackupCode checks and consumes a backup code
|
||||
func (m *MemoryTwoFactorProvider) ValidateBackupCode(userID int, code string) (bool, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
userCodes, exists := m.backupCodes[userID]
|
||||
if !exists {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Hash the provided code
|
||||
hash := sha256.Sum256([]byte(code))
|
||||
hashStr := hex.EncodeToString(hash[:])
|
||||
|
||||
used, exists := userCodes[hashStr]
|
||||
if !exists {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if used {
|
||||
return false, fmt.Errorf("backup code already used")
|
||||
}
|
||||
|
||||
// Mark as used
|
||||
userCodes[hashStr] = true
|
||||
return true, nil
|
||||
}
|
||||
292
pkg/security/totp_test.go
Normal file
292
pkg/security/totp_test.go
Normal file
@@ -0,0 +1,292 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTOTPGenerator_GenerateSecret(t *testing.T) {
|
||||
totp := NewTOTPGenerator(nil)
|
||||
|
||||
secret, err := totp.GenerateSecret()
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateSecret() error = %v", err)
|
||||
}
|
||||
|
||||
if secret == "" {
|
||||
t.Error("GenerateSecret() returned empty secret")
|
||||
}
|
||||
|
||||
// Secret should be base32 encoded
|
||||
if len(secret) < 16 {
|
||||
t.Error("GenerateSecret() returned secret that is too short")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTPGenerator_GenerateQRCodeURL(t *testing.T) {
|
||||
totp := NewTOTPGenerator(nil)
|
||||
|
||||
secret := "JBSWY3DPEHPK3PXP"
|
||||
issuer := "TestApp"
|
||||
accountName := "user@example.com"
|
||||
|
||||
url := totp.GenerateQRCodeURL(secret, issuer, accountName)
|
||||
|
||||
if !strings.HasPrefix(url, "otpauth://totp/") {
|
||||
t.Errorf("GenerateQRCodeURL() = %v, want otpauth://totp/ prefix", url)
|
||||
}
|
||||
|
||||
if !strings.Contains(url, "secret="+secret) {
|
||||
t.Errorf("GenerateQRCodeURL() missing secret parameter")
|
||||
}
|
||||
|
||||
if !strings.Contains(url, "issuer="+issuer) {
|
||||
t.Errorf("GenerateQRCodeURL() missing issuer parameter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTPGenerator_GenerateCode(t *testing.T) {
|
||||
config := &TwoFactorConfig{
|
||||
Algorithm: "SHA1",
|
||||
Digits: 6,
|
||||
Period: 30,
|
||||
SkewWindow: 1,
|
||||
}
|
||||
totp := NewTOTPGenerator(config)
|
||||
|
||||
secret := "JBSWY3DPEHPK3PXP"
|
||||
|
||||
// Test with known time
|
||||
timestamp := time.Unix(1234567890, 0)
|
||||
code, err := totp.GenerateCode(secret, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateCode() error = %v", err)
|
||||
}
|
||||
|
||||
if len(code) != 6 {
|
||||
t.Errorf("GenerateCode() returned code with length %d, want 6", len(code))
|
||||
}
|
||||
|
||||
// Code should be numeric
|
||||
for _, c := range code {
|
||||
if c < '0' || c > '9' {
|
||||
t.Errorf("GenerateCode() returned non-numeric code: %s", code)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTPGenerator_ValidateCode(t *testing.T) {
|
||||
config := &TwoFactorConfig{
|
||||
Algorithm: "SHA1",
|
||||
Digits: 6,
|
||||
Period: 30,
|
||||
SkewWindow: 1,
|
||||
}
|
||||
totp := NewTOTPGenerator(config)
|
||||
|
||||
secret := "JBSWY3DPEHPK3PXP"
|
||||
|
||||
// Generate a code for current time
|
||||
now := time.Now()
|
||||
code, err := totp.GenerateCode(secret, now)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateCode() error = %v", err)
|
||||
}
|
||||
|
||||
// Validate the code
|
||||
valid, err := totp.ValidateCode(secret, code)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateCode() error = %v", err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
t.Error("ValidateCode() = false, want true for current code")
|
||||
}
|
||||
|
||||
// Test with invalid code
|
||||
valid, err = totp.ValidateCode(secret, "000000")
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateCode() error = %v", err)
|
||||
}
|
||||
|
||||
// This might occasionally pass if 000000 is the correct code, but very unlikely
|
||||
if valid && code != "000000" {
|
||||
t.Error("ValidateCode() = true for invalid code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTPGenerator_ValidateCode_WithSkew(t *testing.T) {
|
||||
config := &TwoFactorConfig{
|
||||
Algorithm: "SHA1",
|
||||
Digits: 6,
|
||||
Period: 30,
|
||||
SkewWindow: 2, // Allow 2 periods before/after
|
||||
}
|
||||
totp := NewTOTPGenerator(config)
|
||||
|
||||
secret := "JBSWY3DPEHPK3PXP"
|
||||
|
||||
// Generate code for 1 period ago
|
||||
past := time.Now().Add(-30 * time.Second)
|
||||
code, err := totp.GenerateCode(secret, past)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateCode() error = %v", err)
|
||||
}
|
||||
|
||||
// Should still validate with skew window
|
||||
valid, err := totp.ValidateCode(secret, code)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateCode() error = %v", err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
t.Error("ValidateCode() = false, want true for code within skew window")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTPGenerator_DifferentAlgorithms(t *testing.T) {
|
||||
algorithms := []string{"SHA1", "SHA256", "SHA512"}
|
||||
secret := "JBSWY3DPEHPK3PXP"
|
||||
|
||||
for _, algo := range algorithms {
|
||||
t.Run(algo, func(t *testing.T) {
|
||||
config := &TwoFactorConfig{
|
||||
Algorithm: algo,
|
||||
Digits: 6,
|
||||
Period: 30,
|
||||
SkewWindow: 1,
|
||||
}
|
||||
totp := NewTOTPGenerator(config)
|
||||
|
||||
code, err := totp.GenerateCode(secret, time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateCode() with %s error = %v", algo, err)
|
||||
}
|
||||
|
||||
valid, err := totp.ValidateCode(secret, code)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateCode() with %s error = %v", algo, err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
t.Errorf("ValidateCode() with %s = false, want true", algo)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTPGenerator_8Digits(t *testing.T) {
|
||||
config := &TwoFactorConfig{
|
||||
Algorithm: "SHA1",
|
||||
Digits: 8,
|
||||
Period: 30,
|
||||
SkewWindow: 1,
|
||||
}
|
||||
totp := NewTOTPGenerator(config)
|
||||
|
||||
secret := "JBSWY3DPEHPK3PXP"
|
||||
|
||||
code, err := totp.GenerateCode(secret, time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateCode() error = %v", err)
|
||||
}
|
||||
|
||||
if len(code) != 8 {
|
||||
t.Errorf("GenerateCode() returned code with length %d, want 8", len(code))
|
||||
}
|
||||
|
||||
valid, err := totp.ValidateCode(secret, code)
|
||||
if err != nil {
|
||||
t.Fatalf("ValidateCode() error = %v", err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
t.Error("ValidateCode() = false, want true for 8-digit code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateBackupCodes(t *testing.T) {
|
||||
count := 10
|
||||
codes, err := GenerateBackupCodes(count)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateBackupCodes() error = %v", err)
|
||||
}
|
||||
|
||||
if len(codes) != count {
|
||||
t.Errorf("GenerateBackupCodes() returned %d codes, want %d", len(codes), count)
|
||||
}
|
||||
|
||||
// Check uniqueness
|
||||
seen := make(map[string]bool)
|
||||
for _, code := range codes {
|
||||
if seen[code] {
|
||||
t.Errorf("GenerateBackupCodes() generated duplicate code: %s", code)
|
||||
}
|
||||
seen[code] = true
|
||||
|
||||
// Check format (8 hex characters)
|
||||
if len(code) != 8 {
|
||||
t.Errorf("GenerateBackupCodes() code length = %d, want 8", len(code))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultTwoFactorConfig(t *testing.T) {
|
||||
config := DefaultTwoFactorConfig()
|
||||
|
||||
if config.Algorithm != "SHA1" {
|
||||
t.Errorf("DefaultTwoFactorConfig() Algorithm = %s, want SHA1", config.Algorithm)
|
||||
}
|
||||
|
||||
if config.Digits != 6 {
|
||||
t.Errorf("DefaultTwoFactorConfig() Digits = %d, want 6", config.Digits)
|
||||
}
|
||||
|
||||
if config.Period != 30 {
|
||||
t.Errorf("DefaultTwoFactorConfig() Period = %d, want 30", config.Period)
|
||||
}
|
||||
|
||||
if config.SkewWindow != 1 {
|
||||
t.Errorf("DefaultTwoFactorConfig() SkewWindow = %d, want 1", config.SkewWindow)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTPGenerator_InvalidSecret(t *testing.T) {
|
||||
totp := NewTOTPGenerator(nil)
|
||||
|
||||
// Test with invalid base32 secret
|
||||
_, err := totp.GenerateCode("INVALID!!!", time.Now())
|
||||
if err == nil {
|
||||
t.Error("GenerateCode() with invalid secret should return error")
|
||||
}
|
||||
|
||||
_, err = totp.ValidateCode("INVALID!!!", "123456")
|
||||
if err == nil {
|
||||
t.Error("ValidateCode() with invalid secret should return error")
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark tests
|
||||
func BenchmarkTOTPGenerator_GenerateCode(b *testing.B) {
|
||||
totp := NewTOTPGenerator(nil)
|
||||
secret := "JBSWY3DPEHPK3PXP"
|
||||
now := time.Now()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = totp.GenerateCode(secret, now)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTOTPGenerator_ValidateCode(b *testing.B) {
|
||||
totp := NewTOTPGenerator(nil)
|
||||
secret := "JBSWY3DPEHPK3PXP"
|
||||
code, _ := totp.GenerateCode(secret, time.Now())
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = totp.ValidateCode(secret, code)
|
||||
}
|
||||
}
|
||||
@@ -98,6 +98,7 @@ func (p *EmbedFSProvider) Open(name string) (fs.File, error) {
|
||||
|
||||
// Apply prefix stripping by prepending the prefix to the requested path
|
||||
actualPath := name
|
||||
alternatePath := ""
|
||||
if p.stripPrefix != "" {
|
||||
// Clean the paths to handle leading/trailing slashes
|
||||
prefix := strings.Trim(p.stripPrefix, "/")
|
||||
@@ -105,12 +106,25 @@ func (p *EmbedFSProvider) Open(name string) (fs.File, error) {
|
||||
|
||||
if prefix != "" {
|
||||
actualPath = path.Join(prefix, cleanName)
|
||||
alternatePath = cleanName
|
||||
} else {
|
||||
actualPath = cleanName
|
||||
}
|
||||
}
|
||||
// First try the actual path with prefix
|
||||
if file, err := p.fs.Open(actualPath); err == nil {
|
||||
return file, nil
|
||||
}
|
||||
|
||||
return p.fs.Open(actualPath)
|
||||
// If alternate path is different, try it as well
|
||||
if alternatePath != "" && alternatePath != actualPath {
|
||||
if file, err := p.fs.Open(alternatePath); err == nil {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If both attempts fail, return the error from the first attempt
|
||||
return nil, fmt.Errorf("file not found: %s", name)
|
||||
}
|
||||
|
||||
// Close releases any resources held by the provider.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user