Compare commits

...

10 Commits

Author SHA1 Message Date
Hein
e70bab92d7 feat(tests): 🎉 More test for preload fixes.
Some checks failed
Build , Vet Test, and Lint / Run Vet Tests (1.24.x) (push) Successful in -26m14s
Build , Vet Test, and Lint / Run Vet Tests (1.23.x) (push) Successful in -26m10s
Build , Vet Test, and Lint / Build (push) Successful in -26m22s
Build , Vet Test, and Lint / Lint Code (push) Successful in -26m12s
Tests / Integration Tests (push) Failing after -26m58s
Tests / Unit Tests (push) Successful in -26m47s
* Implement tests for SanitizeWhereClause and AddTablePrefixToColumns.
* Ensure correct handling of table prefixes in WHERE clauses.
* Validate that unqualified columns are prefixed correctly when necessary.
* Add tests for XFiles processing to verify table name handling.
* Introduce tests for recursive preloads and their related keys.
2026-01-30 10:09:59 +02:00
Hein
fc8f44e3e8 feat(preload): Enhance recursive preload functionality
Some checks failed
Build , Vet Test, and Lint / Run Vet Tests (1.24.x) (push) Successful in -26m38s
Build , Vet Test, and Lint / Run Vet Tests (1.23.x) (push) Successful in -26m13s
Build , Vet Test, and Lint / Build (push) Successful in -26m17s
Tests / Integration Tests (push) Failing after -27m1s
Tests / Unit Tests (push) Successful in -26m48s
Build , Vet Test, and Lint / Lint Code (push) Successful in -25m45s
* Increase maximum recursion depth from 4 to 8.
* Generate FK-based relation names for child preloads using RelatedKey.
* Clear WHERE clause for recursive preloads to prevent filtering issues.
* Extend child relations to recursive levels for better data retrieval.
* Add integration tests to validate recursive preload behavior and structure.
2026-01-29 15:31:50 +02:00
Hein
584bb9813d .. 2026-01-29 09:37:22 +02:00
Hein
17239d1611 feat(preload): Add support for custom SQL joins
* Introduce SqlJoins and JoinAliases in PreloadOption.
* Preserve SqlJoins and JoinAliases during filter processing.
* Implement logic to apply custom SQL joins in handler.
* Add tests for SqlJoins handling and join alias extraction.
2026-01-29 09:37:09 +02:00
Hein
defe27549b feat(sql): Improve base64 handling in SqlNull type
Some checks failed
Build , Vet Test, and Lint / Run Vet Tests (1.24.x) (push) Successful in -26m52s
Build , Vet Test, and Lint / Run Vet Tests (1.23.x) (push) Successful in -26m23s
Build , Vet Test, and Lint / Lint Code (push) Successful in -26m28s
Build , Vet Test, and Lint / Build (push) Successful in -26m36s
Tests / Unit Tests (push) Successful in -26m57s
Tests / Integration Tests (push) Failing after -27m7s
* Refactor base64 encoding and decoding checks for []byte types.
* Simplify type assertions using if statements instead of switch cases.
2026-01-27 17:35:13 +02:00
Hein
f7725340a6 feat(sql): Add base64 encoding/decoding for SqlByteArray
* Implement base64 handling in SqlNull for []byte types.
* Add tests for SqlString and SqlByteArray with base64 encoding.
* Ensure proper JSON marshaling and unmarshaling for new types.
2026-01-27 17:33:50 +02:00
Hein
07016d1b73 feat(config): Update timeout settings for connections
Some checks failed
Build , Vet Test, and Lint / Run Vet Tests (1.23.x) (push) Successful in -22m34s
Build , Vet Test, and Lint / Run Vet Tests (1.24.x) (push) Successful in -22m28s
Build , Vet Test, and Lint / Build (push) Successful in -26m3s
Build , Vet Test, and Lint / Lint Code (push) Successful in -25m22s
Tests / Integration Tests (push) Failing after -26m44s
Tests / Unit Tests (push) Successful in -26m2s
* Set default query timeout to 2 minutes and enforce minimum.
* Add statement_timeout for PostgreSQL DSN.
* Implement busy timeout for SQLite with a minimum of 2 minutes.
* Enforce minimum connection timeouts of 10 minutes for server instance.
2026-01-26 11:06:16 +02:00
Hein
09f2256899 feat(sql): Enhance SQL clause handling with parentheses
* Add EnsureOuterParentheses function to wrap clauses in parentheses.
* Implement logic to preserve outer parentheses for OR conditions.
* Update SanitizeWhereClause to utilize new function for better query safety.
* Introduce tests for EnsureOuterParentheses and containsTopLevelOR functions.
* Refactor filter application in handler to group OR filters correctly.
2026-01-26 09:14:17 +02:00
Hein
c12c045db1 feat(validation): Clear JoinAliases in FilterRequestOptions
Some checks failed
Build , Vet Test, and Lint / Run Vet Tests (1.24.x) (push) Successful in -27m20s
Build , Vet Test, and Lint / Run Vet Tests (1.23.x) (push) Successful in -26m49s
Build , Vet Test, and Lint / Build (push) Successful in -26m53s
Build , Vet Test, and Lint / Lint Code (push) Successful in -26m22s
Tests / Integration Tests (push) Failing after -27m37s
Tests / Unit Tests (push) Successful in -27m25s
* Implemented logic to clear JoinAliases after filtering.
* Added unit test to verify JoinAliases is nil post-filtering.
* Ensured other fields are correctly filtered.
2026-01-15 14:43:11 +02:00
Hein
24a7ef7284 feat(restheadspec): Add support for join aliases in filters and sorts
- Extract join aliases from custom SQL JOIN clauses.
- Validate join aliases for filtering and sorting operations.
- Update documentation to reflect new functionality.
- Enhance tests for alias extraction and usage.
2026-01-15 14:18:25 +02:00
23 changed files with 2807 additions and 325 deletions

1
.gitignore vendored
View File

@@ -26,3 +26,4 @@ go.work.sum
bin/ bin/
test.db test.db
/testserver /testserver
tests/data/

View File

@@ -202,23 +202,15 @@ func (b *BunAdapter) GetUnderlyingDB() interface{} {
// BunSelectQuery implements SelectQuery for Bun // BunSelectQuery implements SelectQuery for Bun
type BunSelectQuery struct { type BunSelectQuery struct {
query *bun.SelectQuery query *bun.SelectQuery
db bun.IDB // Store DB connection for count queries db bun.IDB // Store DB connection for count queries
hasModel bool // Track if Model() was called hasModel bool // Track if Model() was called
schema string // Separated schema name schema string // Separated schema name
tableName string // Just the table name, without schema tableName string // Just the table name, without schema
tableAlias string tableAlias string
deferredPreloads []deferredPreload // Preloads to execute as separate queries inJoinContext bool // Track if we're in a JOIN relation context
inJoinContext bool // Track if we're in a JOIN relation context joinTableAlias string // Alias to use for JOIN conditions
joinTableAlias string // Alias to use for JOIN conditions skipAutoDetect bool // Skip auto-detection to prevent circular calls
skipAutoDetect bool // Skip auto-detection to prevent circular calls
}
// deferredPreload represents a preload that will be executed as a separate query
// to avoid PostgreSQL identifier length limits
type deferredPreload struct {
relation string
apply []func(common.SelectQuery) common.SelectQuery
} }
func (b *BunSelectQuery) Model(model interface{}) common.SelectQuery { func (b *BunSelectQuery) Model(model interface{}) common.SelectQuery {
@@ -487,51 +479,8 @@ func (b *BunSelectQuery) Preload(relation string, conditions ...interface{}) com
return b return b
} }
// // shortenAliasForPostgres shortens a table/relation alias if it would exceed PostgreSQL's 63-char limit
// // when combined with typical column names
// func shortenAliasForPostgres(relationPath string) (string, bool) {
// // Convert relation path to the alias format Bun uses: dots become double underscores
// // Also convert to lowercase and use snake_case as Bun does
// parts := strings.Split(relationPath, ".")
// alias := strings.ToLower(strings.Join(parts, "__"))
// // PostgreSQL truncates identifiers to 63 chars
// // If the alias + typical column name would exceed this, we need to shorten
// // Reserve at least 30 chars for column names (e.g., "__rid_mastertype_hubtype")
// const maxAliasLength = 30
// if len(alias) > maxAliasLength {
// // Create a shortened alias using a hash of the original
// hash := md5.Sum([]byte(alias))
// hashStr := hex.EncodeToString(hash[:])[:8]
// // Keep first few chars of original for readability + hash
// prefixLen := maxAliasLength - 9 // 9 = 1 underscore + 8 hash chars
// if prefixLen > len(alias) {
// prefixLen = len(alias)
// }
// shortened := alias[:prefixLen] + "_" + hashStr
// logger.Debug("Shortened alias '%s' (%d chars) to '%s' (%d chars) to avoid PostgreSQL 63-char limit",
// alias, len(alias), shortened, len(shortened))
// return shortened, true
// }
// return alias, false
// }
// // estimateColumnAliasLength estimates the length of a column alias in a nested preload
// // Bun creates aliases like: relationChain__columnName
// func estimateColumnAliasLength(relationPath string, columnName string) int {
// relationParts := strings.Split(relationPath, ".")
// aliasChain := strings.ToLower(strings.Join(relationParts, "__"))
// // Bun adds "__" between alias and column name
// return len(aliasChain) + 2 + len(columnName)
// }
func (b *BunSelectQuery) PreloadRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery { func (b *BunSelectQuery) PreloadRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
// Auto-detect relationship type and choose optimal loading strategy // Auto-detect relationship type and choose optimal loading strategy
// Get the model from the query if available
// Skip auto-detection if flag is set (prevents circular calls from JoinRelation) // Skip auto-detection if flag is set (prevents circular calls from JoinRelation)
if !b.skipAutoDetect { if !b.skipAutoDetect {
model := b.query.GetModel() model := b.query.GetModel()
@@ -554,49 +503,7 @@ func (b *BunSelectQuery) PreloadRelation(relation string, apply ...func(common.S
} }
} }
// Check if this relation chain would create problematic long aliases // Use Bun's native Relation() for preloading
relationParts := strings.Split(relation, ".")
aliasChain := strings.ToLower(strings.Join(relationParts, "__"))
// PostgreSQL's identifier limit is 63 characters
const postgresIdentifierLimit = 63
const safeAliasLimit = 35 // Leave room for column names
// If the alias chain is too long, defer this preload to be executed as a separate query
if len(relationParts) > 1 && len(aliasChain) > safeAliasLimit {
logger.Info("Preload relation '%s' creates long alias chain '%s' (%d chars). "+
"Using separate query to avoid PostgreSQL %d-char identifier limit.",
relation, aliasChain, len(aliasChain), postgresIdentifierLimit)
// For nested preloads (e.g., "Parent.Child"), split into separate preloads
// This avoids the long concatenated alias
if len(relationParts) > 1 {
// Load first level normally: "Parent"
firstLevel := relationParts[0]
remainingPath := strings.Join(relationParts[1:], ".")
logger.Info("Splitting nested preload: loading '%s' first, then '%s' separately",
firstLevel, remainingPath)
// Apply the first level preload normally
b.query = b.query.Relation(firstLevel)
// Store the remaining nested preload to be executed after the main query
b.deferredPreloads = append(b.deferredPreloads, deferredPreload{
relation: relation,
apply: apply,
})
return b
}
// Single level but still too long - just warn and continue
logger.Warn("Single-level preload '%s' has a very long name (%d chars). "+
"Consider renaming the field to avoid potential issues.",
relation, len(aliasChain))
}
// Normal preload handling
b.query = b.query.Relation(relation, func(sq *bun.SelectQuery) *bun.SelectQuery { b.query = b.query.Relation(relation, func(sq *bun.SelectQuery) *bun.SelectQuery {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
@@ -629,12 +536,7 @@ func (b *BunSelectQuery) PreloadRelation(relation string, apply ...func(common.S
// Extract table alias if model implements TableAliasProvider // Extract table alias if model implements TableAliasProvider
if provider, ok := modelValue.(common.TableAliasProvider); ok { if provider, ok := modelValue.(common.TableAliasProvider); ok {
wrapper.tableAlias = provider.TableAlias() wrapper.tableAlias = provider.TableAlias()
// Apply the alias to the Bun query so conditions can reference it logger.Debug("Preload relation '%s' using table alias: %s", relation, wrapper.tableAlias)
if wrapper.tableAlias != "" {
// Note: Bun's Relation() already sets up the table, but we can add
// the alias explicitly if needed
logger.Debug("Preload relation '%s' using table alias: %s", relation, wrapper.tableAlias)
}
} }
} }
@@ -644,7 +546,6 @@ func (b *BunSelectQuery) PreloadRelation(relation string, apply ...func(common.S
// Apply each function in sequence // Apply each function in sequence
for _, fn := range apply { for _, fn := range apply {
if fn != nil { if fn != nil {
// Pass &current (pointer to interface variable), fn modifies and returns new interface value
modified := fn(current) modified := fn(current)
current = modified current = modified
} }
@@ -734,7 +635,6 @@ func (b *BunSelectQuery) Scan(ctx context.Context, dest interface{}) (err error)
return fmt.Errorf("destination cannot be nil") return fmt.Errorf("destination cannot be nil")
} }
// Execute the main query first
err = b.query.Scan(ctx, dest) err = b.query.Scan(ctx, dest)
if err != nil { if err != nil {
// Log SQL string for debugging // Log SQL string for debugging
@@ -743,17 +643,6 @@ func (b *BunSelectQuery) Scan(ctx context.Context, dest interface{}) (err error)
return err return err
} }
// Execute any deferred preloads
if len(b.deferredPreloads) > 0 {
err = b.executeDeferredPreloads(ctx, dest)
if err != nil {
logger.Warn("Failed to execute deferred preloads: %v", err)
// Don't fail the whole query, just log the warning
}
// Clear deferred preloads to prevent re-execution
b.deferredPreloads = nil
}
return nil return nil
} }
@@ -803,7 +692,6 @@ func (b *BunSelectQuery) ScanModel(ctx context.Context) (err error) {
} }
} }
// Execute the main query first
err = b.query.Scan(ctx) err = b.query.Scan(ctx)
if err != nil { if err != nil {
// Log SQL string for debugging // Log SQL string for debugging
@@ -812,147 +700,9 @@ func (b *BunSelectQuery) ScanModel(ctx context.Context) (err error) {
return err return err
} }
// Execute any deferred preloads
if len(b.deferredPreloads) > 0 {
model := b.query.GetModel()
err = b.executeDeferredPreloads(ctx, model.Value())
if err != nil {
logger.Warn("Failed to execute deferred preloads: %v", err)
// Don't fail the whole query, just log the warning
}
// Clear deferred preloads to prevent re-execution
b.deferredPreloads = nil
}
return nil return nil
} }
// executeDeferredPreloads executes preloads that were deferred to avoid PostgreSQL identifier length limits
func (b *BunSelectQuery) executeDeferredPreloads(ctx context.Context, dest interface{}) error {
if len(b.deferredPreloads) == 0 {
return nil
}
for _, dp := range b.deferredPreloads {
err := b.executeSingleDeferredPreload(ctx, dest, dp)
if err != nil {
return fmt.Errorf("failed to execute deferred preload '%s': %w", dp.relation, err)
}
}
return nil
}
// executeSingleDeferredPreload executes a single deferred preload
// For a relation like "Parent.Child", it:
// 1. Finds all loaded Parent records in dest
// 2. Loads Child records for those Parents using a separate query (loading only "Child", not "Parent.Child")
// 3. Bun automatically assigns the Child records to the appropriate Parent.Child field
func (b *BunSelectQuery) executeSingleDeferredPreload(ctx context.Context, dest interface{}, dp deferredPreload) error {
relationParts := strings.Split(dp.relation, ".")
if len(relationParts) < 2 {
return fmt.Errorf("deferred preload must be nested (e.g., 'Parent.Child'), got: %s", dp.relation)
}
// The parent relation that was already loaded
parentRelation := relationParts[0]
// The child relation we need to load
childRelation := strings.Join(relationParts[1:], ".")
logger.Debug("Executing deferred preload: loading '%s' on already-loaded '%s'", childRelation, parentRelation)
// Use reflection to access the parent relation field(s) in the loaded records
// Then load the child relation for those parent records
destValue := reflect.ValueOf(dest)
if destValue.Kind() == reflect.Ptr {
destValue = destValue.Elem()
}
// Handle both slice and single record
if destValue.Kind() == reflect.Slice {
// Iterate through each record in the slice
for i := 0; i < destValue.Len(); i++ {
record := destValue.Index(i)
if err := b.loadChildRelationForRecord(ctx, record, parentRelation, childRelation, dp.apply); err != nil {
logger.Warn("Failed to load child relation '%s' for record %d: %v", childRelation, i, err)
// Continue with other records
}
}
} else {
// Single record
if err := b.loadChildRelationForRecord(ctx, destValue, parentRelation, childRelation, dp.apply); err != nil {
return fmt.Errorf("failed to load child relation '%s': %w", childRelation, err)
}
}
return nil
}
// loadChildRelationForRecord loads a child relation for a single parent record
func (b *BunSelectQuery) loadChildRelationForRecord(ctx context.Context, record reflect.Value, parentRelation, childRelation string, apply []func(common.SelectQuery) common.SelectQuery) error {
// Ensure we're working with the actual struct value, not a pointer
if record.Kind() == reflect.Ptr {
record = record.Elem()
}
// Get the parent relation field
parentField := record.FieldByName(parentRelation)
if !parentField.IsValid() {
// Parent relation field doesn't exist
logger.Debug("Parent relation field '%s' not found in record", parentRelation)
return nil
}
// Check if the parent field is nil (for pointer fields)
if parentField.Kind() == reflect.Ptr && parentField.IsNil() {
// Parent relation not loaded or nil, skip
logger.Debug("Parent relation field '%s' is nil, skipping child preload", parentRelation)
return nil
}
// Get a pointer to the parent field so Bun can modify it
// CRITICAL: We need to pass a pointer, not a value, so that when Bun
// loads the child records and appends them to the slice, the changes
// are reflected in the original struct field.
var parentPtr interface{}
if parentField.Kind() == reflect.Ptr {
// Field is already a pointer (e.g., Parent *Parent), use as-is
parentPtr = parentField.Interface()
} else {
// Field is a value (e.g., Comments []Comment), get its address
if parentField.CanAddr() {
parentPtr = parentField.Addr().Interface()
} else {
return fmt.Errorf("cannot get address of field '%s'", parentRelation)
}
}
// Load the child relation on the parent record
// This uses a shorter alias since we're only loading "Child", not "Parent.Child"
// CRITICAL: Use WherePK() to ensure we only load children for THIS specific parent
// record, not the first parent in the database table.
return b.db.NewSelect().
Model(parentPtr).
WherePK().
Relation(childRelation, func(sq *bun.SelectQuery) *bun.SelectQuery {
// Apply any custom query modifications
if len(apply) > 0 {
wrapper := &BunSelectQuery{query: sq, db: b.db}
current := common.SelectQuery(wrapper)
for _, fn := range apply {
if fn != nil {
current = fn(current)
}
}
if finalBun, ok := current.(*BunSelectQuery); ok {
return finalBun.query
}
}
return sq
}).
Scan(ctx)
}
func (b *BunSelectQuery) Count(ctx context.Context) (count int, err error) { func (b *BunSelectQuery) Count(ctx context.Context) (count int, err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {

View File

@@ -130,6 +130,9 @@ func validateWhereClauseSecurity(where string) error {
// Note: This function will NOT add prefixes to unprefixed columns. It will only fix // Note: This function will NOT add prefixes to unprefixed columns. It will only fix
// incorrect prefixes (e.g., wrong_table.column -> correct_table.column), unless the // incorrect prefixes (e.g., wrong_table.column -> correct_table.column), unless the
// prefix matches a preloaded relation name, in which case it's left unchanged. // prefix matches a preloaded relation name, in which case it's left unchanged.
//
// IMPORTANT: Outer parentheses are preserved if the clause contains top-level OR operators
// to prevent OR logic from escaping and affecting the entire query incorrectly.
func SanitizeWhereClause(where string, tableName string, options ...*RequestOptions) string { func SanitizeWhereClause(where string, tableName string, options ...*RequestOptions) string {
if where == "" { if where == "" {
return "" return ""
@@ -143,8 +146,19 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
return "" return ""
} }
// Strip outer parentheses and re-trim // Check if the original clause has outer parentheses and contains OR operators
where = stripOuterParentheses(where) // If so, we need to preserve the outer parentheses to prevent OR logic from escaping
hasOuterParens := false
if len(where) > 0 && where[0] == '(' && where[len(where)-1] == ')' {
_, hasOuterParens = stripOneMatchingOuterParen(where)
}
// Strip outer parentheses and re-trim for processing
whereWithoutParens := stripOuterParentheses(where)
shouldPreserveParens := hasOuterParens && containsTopLevelOR(whereWithoutParens)
// Use the stripped version for processing
where = whereWithoutParens
// Get valid columns from the model if tableName is provided // Get valid columns from the model if tableName is provided
var validColumns map[string]bool var validColumns map[string]bool
@@ -166,6 +180,14 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
logger.Debug("Added preload relation '%s' as allowed table prefix", options[0].Preload[pi].Relation) logger.Debug("Added preload relation '%s' as allowed table prefix", options[0].Preload[pi].Relation)
} }
} }
// Add join aliases as allowed prefixes
for _, alias := range options[0].JoinAliases {
if alias != "" {
allowedPrefixes[alias] = true
logger.Debug("Added join alias '%s' as allowed table prefix", alias)
}
}
} }
// Split by AND to handle multiple conditions // Split by AND to handle multiple conditions
@@ -221,7 +243,14 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
result := strings.Join(validConditions, " AND ") result := strings.Join(validConditions, " AND ")
if result != where { // If the original clause had outer parentheses and contains OR operators,
// restore the outer parentheses to prevent OR logic from escaping
if shouldPreserveParens {
result = "(" + result + ")"
logger.Debug("Preserved outer parentheses for OR conditions: '%s'", result)
}
if result != where && !shouldPreserveParens {
logger.Debug("Sanitized WHERE clause: '%s' -> '%s'", where, result) logger.Debug("Sanitized WHERE clause: '%s' -> '%s'", where, result)
} }
@@ -282,6 +311,93 @@ func stripOneMatchingOuterParen(s string) (string, bool) {
return strings.TrimSpace(s[1 : len(s)-1]), true return strings.TrimSpace(s[1 : len(s)-1]), true
} }
// EnsureOuterParentheses ensures that a SQL clause is wrapped in parentheses
// to prevent OR logic from escaping. It checks if the clause already has
// matching outer parentheses and only adds them if they don't exist.
//
// This is particularly important for OR conditions and complex filters where
// the absence of parentheses could cause the logic to escape and affect
// the entire query incorrectly.
//
// Parameters:
// - clause: The SQL clause to check and potentially wrap
//
// Returns:
// - The clause with guaranteed outer parentheses, or empty string if input is empty
func EnsureOuterParentheses(clause string) string {
if clause == "" {
return ""
}
clause = strings.TrimSpace(clause)
if clause == "" {
return ""
}
// Check if the clause already has matching outer parentheses
_, hasOuterParens := stripOneMatchingOuterParen(clause)
// If it already has matching outer parentheses, return as-is
if hasOuterParens {
return clause
}
// Otherwise, wrap it in parentheses
return "(" + clause + ")"
}
// containsTopLevelOR checks if a SQL clause contains OR operators at the top level
// (i.e., not inside parentheses or subqueries). This is used to determine if
// outer parentheses should be preserved to prevent OR logic from escaping.
func containsTopLevelOR(clause string) bool {
if clause == "" {
return false
}
depth := 0
inSingleQuote := false
inDoubleQuote := false
lowerClause := strings.ToLower(clause)
for i := 0; i < len(clause); i++ {
ch := clause[i]
// Track quote state
if ch == '\'' && !inDoubleQuote {
inSingleQuote = !inSingleQuote
continue
}
if ch == '"' && !inSingleQuote {
inDoubleQuote = !inDoubleQuote
continue
}
// Skip if inside quotes
if inSingleQuote || inDoubleQuote {
continue
}
// Track parenthesis depth
switch ch {
case '(':
depth++
case ')':
depth--
}
// Only check for OR at depth 0 (not inside parentheses)
if depth == 0 && i+4 <= len(clause) {
// Check for " OR " (case-insensitive)
substring := lowerClause[i : i+4]
if substring == " or " {
return true
}
}
}
return false
}
// splitByAND splits a WHERE clause by AND operators (case-insensitive) // splitByAND splits a WHERE clause by AND operators (case-insensitive)
// This is parenthesis-aware and won't split on AND operators inside subqueries // This is parenthesis-aware and won't split on AND operators inside subqueries
func splitByAND(where string) []string { func splitByAND(where string) []string {

View File

@@ -0,0 +1,103 @@
package common
import (
"testing"
)
// TestSanitizeWhereClause_WithTableName tests that table prefixes in WHERE clauses
// are correctly handled when the tableName parameter matches the prefix
func TestSanitizeWhereClause_WithTableName(t *testing.T) {
tests := []struct {
name string
where string
tableName string
options *RequestOptions
expected string
}{
{
name: "Correct table prefix should not be changed",
where: "mastertaskitem.rid_parentmastertaskitem is null",
tableName: "mastertaskitem",
options: nil,
expected: "mastertaskitem.rid_parentmastertaskitem is null",
},
{
name: "Wrong table prefix should be fixed",
where: "wrong_table.rid_parentmastertaskitem is null",
tableName: "mastertaskitem",
options: nil,
expected: "mastertaskitem.rid_parentmastertaskitem is null",
},
{
name: "Relation name should not replace correct table prefix",
where: "mastertaskitem.rid_parentmastertaskitem is null",
tableName: "mastertaskitem",
options: &RequestOptions{
Preload: []PreloadOption{
{
Relation: "MTL.MAL.MAL_RID_PARENTMASTERTASKITEM",
TableName: "mastertaskitem",
},
},
},
expected: "mastertaskitem.rid_parentmastertaskitem is null",
},
{
name: "Unqualified column should remain unqualified",
where: "rid_parentmastertaskitem is null",
tableName: "mastertaskitem",
options: nil,
expected: "rid_parentmastertaskitem is null",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := SanitizeWhereClause(tt.where, tt.tableName, tt.options)
if result != tt.expected {
t.Errorf("SanitizeWhereClause(%q, %q) = %q, want %q",
tt.where, tt.tableName, result, tt.expected)
}
})
}
}
// TestAddTablePrefixToColumns_WithTableName tests that table prefixes
// are correctly added to unqualified columns
func TestAddTablePrefixToColumns_WithTableName(t *testing.T) {
tests := []struct {
name string
where string
tableName string
expected string
}{
{
name: "Add prefix to unqualified column",
where: "rid_parentmastertaskitem is null",
tableName: "mastertaskitem",
expected: "mastertaskitem.rid_parentmastertaskitem is null",
},
{
name: "Don't change already qualified column",
where: "mastertaskitem.rid_parentmastertaskitem is null",
tableName: "mastertaskitem",
expected: "mastertaskitem.rid_parentmastertaskitem is null",
},
{
name: "Don't change qualified column with different table",
where: "other_table.rid_something is null",
tableName: "mastertaskitem",
expected: "other_table.rid_something is null",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := AddTablePrefixToColumns(tt.where, tt.tableName)
if result != tt.expected {
t.Errorf("AddTablePrefixToColumns(%q, %q) = %q, want %q",
tt.where, tt.tableName, result, tt.expected)
}
})
}
}

View File

@@ -659,6 +659,179 @@ func TestSanitizeWhereClauseWithModel(t *testing.T) {
} }
} }
func TestEnsureOuterParentheses(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "no parentheses",
input: "status = 'active'",
expected: "(status = 'active')",
},
{
name: "already has outer parentheses",
input: "(status = 'active')",
expected: "(status = 'active')",
},
{
name: "OR condition without parentheses",
input: "status = 'active' OR status = 'pending'",
expected: "(status = 'active' OR status = 'pending')",
},
{
name: "OR condition with parentheses",
input: "(status = 'active' OR status = 'pending')",
expected: "(status = 'active' OR status = 'pending')",
},
{
name: "complex condition with nested parentheses",
input: "(status = 'active' OR status = 'pending') AND (age > 18)",
expected: "((status = 'active' OR status = 'pending') AND (age > 18))",
},
{
name: "empty string",
input: "",
expected: "",
},
{
name: "whitespace only",
input: " ",
expected: "",
},
{
name: "mismatched parentheses - adds outer ones",
input: "(status = 'active' OR status = 'pending'",
expected: "((status = 'active' OR status = 'pending')",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := EnsureOuterParentheses(tt.input)
if result != tt.expected {
t.Errorf("EnsureOuterParentheses(%q) = %q; want %q", tt.input, result, tt.expected)
}
})
}
}
func TestContainsTopLevelOR(t *testing.T) {
tests := []struct {
name string
input string
expected bool
}{
{
name: "no OR operator",
input: "status = 'active' AND age > 18",
expected: false,
},
{
name: "top-level OR",
input: "status = 'active' OR status = 'pending'",
expected: true,
},
{
name: "OR inside parentheses",
input: "age > 18 AND (status = 'active' OR status = 'pending')",
expected: false,
},
{
name: "OR in subquery",
input: "id IN (SELECT id FROM users WHERE status = 'active' OR status = 'pending')",
expected: false,
},
{
name: "OR inside quotes",
input: "comment = 'this OR that'",
expected: false,
},
{
name: "mixed - top-level OR and nested OR",
input: "name = 'test' OR (status = 'active' OR status = 'pending')",
expected: true,
},
{
name: "empty string",
input: "",
expected: false,
},
{
name: "lowercase or",
input: "status = 'active' or status = 'pending'",
expected: true,
},
{
name: "uppercase OR",
input: "status = 'active' OR status = 'pending'",
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := containsTopLevelOR(tt.input)
if result != tt.expected {
t.Errorf("containsTopLevelOR(%q) = %v; want %v", tt.input, result, tt.expected)
}
})
}
}
func TestSanitizeWhereClause_PreservesParenthesesWithOR(t *testing.T) {
tests := []struct {
name string
where string
tableName string
expected string
}{
{
name: "OR condition with outer parentheses - preserved",
where: "(status = 'active' OR status = 'pending')",
tableName: "users",
expected: "(users.status = 'active' OR users.status = 'pending')",
},
{
name: "AND condition with outer parentheses - stripped (no OR)",
where: "(status = 'active' AND age > 18)",
tableName: "users",
expected: "users.status = 'active' AND users.age > 18",
},
{
name: "complex OR with nested conditions",
where: "((status = 'active' OR status = 'pending') AND age > 18)",
tableName: "users",
// Outer parens are stripped, but inner parens with OR are preserved
expected: "(users.status = 'active' OR users.status = 'pending') AND users.age > 18",
},
{
name: "OR without outer parentheses - no parentheses added by SanitizeWhereClause",
where: "status = 'active' OR status = 'pending'",
tableName: "users",
expected: "users.status = 'active' OR users.status = 'pending'",
},
{
name: "simple OR with parentheses - preserved",
where: "(users.status = 'active' OR users.status = 'pending')",
tableName: "users",
// Already has correct prefixes, parentheses preserved
expected: "(users.status = 'active' OR users.status = 'pending')",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
prefixedWhere := AddTablePrefixToColumns(tt.where, tt.tableName)
result := SanitizeWhereClause(prefixedWhere, tt.tableName)
if result != tt.expected {
t.Errorf("SanitizeWhereClause(%q, %q) = %q; want %q", tt.where, tt.tableName, result, tt.expected)
}
})
}
}
func TestAddTablePrefixToColumns_ComplexConditions(t *testing.T) { func TestAddTablePrefixToColumns_ComplexConditions(t *testing.T) {
tests := []struct { tests := []struct {
name string name string

View File

@@ -23,6 +23,10 @@ type RequestOptions struct {
CursorForward string `json:"cursor_forward"` CursorForward string `json:"cursor_forward"`
CursorBackward string `json:"cursor_backward"` CursorBackward string `json:"cursor_backward"`
FetchRowNumber *string `json:"fetch_row_number"` FetchRowNumber *string `json:"fetch_row_number"`
// Join table aliases (used for validation of prefixed columns in filters/sorts)
// Not serialized to JSON as it's internal validation state
JoinAliases []string `json:"-"`
} }
type Parameter struct { type Parameter struct {
@@ -33,6 +37,7 @@ type Parameter struct {
type PreloadOption struct { type PreloadOption struct {
Relation string `json:"relation"` Relation string `json:"relation"`
TableName string `json:"table_name"` // Actual database table name (e.g., "mastertaskitem")
Columns []string `json:"columns"` Columns []string `json:"columns"`
OmitColumns []string `json:"omit_columns"` OmitColumns []string `json:"omit_columns"`
Sort []SortOption `json:"sort"` Sort []SortOption `json:"sort"`
@@ -45,9 +50,14 @@ type PreloadOption struct {
Recursive bool `json:"recursive"` // if true, preload recursively up to 5 levels Recursive bool `json:"recursive"` // if true, preload recursively up to 5 levels
// Relationship keys from XFiles - used to build proper foreign key filters // Relationship keys from XFiles - used to build proper foreign key filters
PrimaryKey string `json:"primary_key"` // Primary key of the related table PrimaryKey string `json:"primary_key"` // Primary key of the related table
RelatedKey string `json:"related_key"` // For child tables: column in child that references parent RelatedKey string `json:"related_key"` // For child tables: column in child that references parent
ForeignKey string `json:"foreign_key"` // For parent tables: column in current table that references parent ForeignKey string `json:"foreign_key"` // For parent tables: column in current table that references parent
RecursiveChildKey string `json:"recursive_child_key"` // For recursive tables: FK column used for recursion (e.g., "rid_parentmastertaskitem")
// Custom SQL JOINs from XFiles - used when preload needs additional joins
SqlJoins []string `json:"sql_joins"` // Custom SQL JOIN clauses
JoinAliases []string `json:"join_aliases"` // Extracted table aliases from SqlJoins for validation
} }
type FilterOption struct { type FilterOption struct {

View File

@@ -237,15 +237,29 @@ func (v *ColumnValidator) FilterRequestOptions(options RequestOptions) RequestOp
for _, sort := range options.Sort { for _, sort := range options.Sort {
if v.IsValidColumn(sort.Column) { if v.IsValidColumn(sort.Column) {
validSorts = append(validSorts, sort) validSorts = append(validSorts, sort)
} else if strings.HasPrefix(sort.Column, "(") && strings.HasSuffix(sort.Column, ")") {
// Allow sort by expression/subquery, but validate for security
if IsSafeSortExpression(sort.Column) {
validSorts = append(validSorts, sort)
} else {
logger.Warn("Unsafe sort expression '%s' removed", sort.Column)
}
} else { } else {
logger.Warn("Invalid column in sort '%s' removed", sort.Column) foundJoin := false
for _, j := range options.JoinAliases {
if strings.Contains(sort.Column, j) {
foundJoin = true
break
}
}
if foundJoin {
validSorts = append(validSorts, sort)
continue
}
if strings.HasPrefix(sort.Column, "(") && strings.HasSuffix(sort.Column, ")") {
// Allow sort by expression/subquery, but validate for security
if IsSafeSortExpression(sort.Column) {
validSorts = append(validSorts, sort)
} else {
logger.Warn("Unsafe sort expression '%s' removed", sort.Column)
}
} else {
logger.Warn("Invalid column in sort '%s' removed", sort.Column)
}
} }
} }
filtered.Sort = validSorts filtered.Sort = validSorts
@@ -258,13 +272,29 @@ func (v *ColumnValidator) FilterRequestOptions(options RequestOptions) RequestOp
filteredPreload.Columns = v.FilterValidColumns(preload.Columns) filteredPreload.Columns = v.FilterValidColumns(preload.Columns)
filteredPreload.OmitColumns = v.FilterValidColumns(preload.OmitColumns) filteredPreload.OmitColumns = v.FilterValidColumns(preload.OmitColumns)
// Preserve SqlJoins and JoinAliases for preloads with custom joins
filteredPreload.SqlJoins = preload.SqlJoins
filteredPreload.JoinAliases = preload.JoinAliases
// Filter preload filters // Filter preload filters
validPreloadFilters := make([]FilterOption, 0, len(preload.Filters)) validPreloadFilters := make([]FilterOption, 0, len(preload.Filters))
for _, filter := range preload.Filters { for _, filter := range preload.Filters {
if v.IsValidColumn(filter.Column) { if v.IsValidColumn(filter.Column) {
validPreloadFilters = append(validPreloadFilters, filter) validPreloadFilters = append(validPreloadFilters, filter)
} else { } else {
logger.Warn("Invalid column in preload '%s' filter '%s' removed", preload.Relation, filter.Column) // Check if the filter column references a joined table alias
foundJoin := false
for _, alias := range preload.JoinAliases {
if strings.Contains(filter.Column, alias) {
foundJoin = true
break
}
}
if foundJoin {
validPreloadFilters = append(validPreloadFilters, filter)
} else {
logger.Warn("Invalid column in preload '%s' filter '%s' removed", preload.Relation, filter.Column)
}
} }
} }
filteredPreload.Filters = validPreloadFilters filteredPreload.Filters = validPreloadFilters
@@ -291,6 +321,9 @@ func (v *ColumnValidator) FilterRequestOptions(options RequestOptions) RequestOp
} }
filtered.Preload = validPreloads filtered.Preload = validPreloads
// Clear JoinAliases - this is an internal validation field and should not be persisted
filtered.JoinAliases = nil
return filtered return filtered
} }

View File

@@ -362,6 +362,29 @@ func TestFilterRequestOptions(t *testing.T) {
} }
} }
func TestFilterRequestOptions_ClearsJoinAliases(t *testing.T) {
model := TestModel{}
validator := NewColumnValidator(model)
options := RequestOptions{
Columns: []string{"id", "name"},
// Set JoinAliases - this should be cleared by FilterRequestOptions
JoinAliases: []string{"d", "u", "r"},
}
filtered := validator.FilterRequestOptions(options)
// Verify that JoinAliases was cleared (internal field should not persist)
if filtered.JoinAliases != nil {
t.Errorf("Expected JoinAliases to be nil after filtering, got %v", filtered.JoinAliases)
}
// Verify that other fields are still properly filtered
if len(filtered.Columns) != 2 {
t.Errorf("Expected 2 columns, got %d", len(filtered.Columns))
}
}
func TestIsSafeSortExpression(t *testing.T) { func TestIsSafeSortExpression(t *testing.T) {
tests := []struct { tests := []struct {
name string name string

View File

@@ -221,7 +221,10 @@ func (cc *ConnectionConfig) ApplyDefaults(global *ManagerConfig) {
cc.ConnectTimeout = 10 * time.Second cc.ConnectTimeout = 10 * time.Second
} }
if cc.QueryTimeout == 0 { if cc.QueryTimeout == 0 {
cc.QueryTimeout = 30 * time.Second cc.QueryTimeout = 2 * time.Minute // Default to 2 minutes
} else if cc.QueryTimeout < 2*time.Minute {
// Enforce minimum of 2 minutes
cc.QueryTimeout = 2 * time.Minute
} }
// Default ORM // Default ORM
@@ -325,14 +328,29 @@ func (cc *ConnectionConfig) buildPostgresDSN() string {
dsn += fmt.Sprintf(" search_path=%s", cc.Schema) dsn += fmt.Sprintf(" search_path=%s", cc.Schema)
} }
// Add statement_timeout for query execution timeout (in milliseconds)
if cc.QueryTimeout > 0 {
timeoutMs := int(cc.QueryTimeout.Milliseconds())
dsn += fmt.Sprintf(" statement_timeout=%d", timeoutMs)
}
return dsn return dsn
} }
func (cc *ConnectionConfig) buildSQLiteDSN() string { func (cc *ConnectionConfig) buildSQLiteDSN() string {
if cc.FilePath != "" { filepath := cc.FilePath
return cc.FilePath if filepath == "" {
filepath = ":memory:"
} }
return ":memory:"
// Add query parameters for timeouts
// Note: SQLite driver supports _timeout parameter (in milliseconds)
if cc.QueryTimeout > 0 {
timeoutMs := int(cc.QueryTimeout.Milliseconds())
filepath += fmt.Sprintf("?_timeout=%d", timeoutMs)
}
return filepath
} }
func (cc *ConnectionConfig) buildMSSQLDSN() string { func (cc *ConnectionConfig) buildMSSQLDSN() string {
@@ -344,6 +362,24 @@ func (cc *ConnectionConfig) buildMSSQLDSN() string {
dsn += fmt.Sprintf("&schema=%s", cc.Schema) dsn += fmt.Sprintf("&schema=%s", cc.Schema)
} }
// Add connection timeout (in seconds)
if cc.ConnectTimeout > 0 {
timeoutSec := int(cc.ConnectTimeout.Seconds())
dsn += fmt.Sprintf("&connection timeout=%d", timeoutSec)
}
// Add dial timeout for TCP connection (in seconds)
if cc.ConnectTimeout > 0 {
dialTimeoutSec := int(cc.ConnectTimeout.Seconds())
dsn += fmt.Sprintf("&dial timeout=%d", dialTimeoutSec)
}
// Add read timeout (in seconds) - enforces timeout for reading data
if cc.QueryTimeout > 0 {
readTimeoutSec := int(cc.QueryTimeout.Seconds())
dsn += fmt.Sprintf("&read timeout=%d", readTimeoutSec)
}
return dsn return dsn
} }

View File

@@ -76,8 +76,12 @@ func (p *SQLiteProvider) Connect(ctx context.Context, cfg ConnectionConfig) erro
// Don't fail connection if WAL mode cannot be enabled // Don't fail connection if WAL mode cannot be enabled
} }
// Set busy timeout to handle locked database // Set busy timeout to handle locked database (minimum 2 minutes = 120000ms)
_, err = db.ExecContext(ctx, "PRAGMA busy_timeout=5000") busyTimeout := cfg.GetQueryTimeout().Milliseconds()
if busyTimeout < 120000 {
busyTimeout = 120000 // Enforce minimum of 2 minutes
}
_, err = db.ExecContext(ctx, fmt.Sprintf("PRAGMA busy_timeout=%d", busyTimeout))
if err != nil { if err != nil {
if cfg.GetEnableLogging() { if cfg.GetEnableLogging() {
logger.Warn("Failed to set busy timeout for SQLite", "error", err) logger.Warn("Failed to set busy timeout for SQLite", "error", err)

View File

@@ -318,6 +318,8 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
if cursorFilter != "" { if cursorFilter != "" {
logger.Debug("Applying cursor filter: %s", cursorFilter) logger.Debug("Applying cursor filter: %s", cursorFilter)
sanitizedCursor := common.SanitizeWhereClause(cursorFilter, reflection.ExtractTableNameOnly(tableName), &options) sanitizedCursor := common.SanitizeWhereClause(cursorFilter, reflection.ExtractTableNameOnly(tableName), &options)
// Ensure outer parentheses to prevent OR logic from escaping
sanitizedCursor = common.EnsureOuterParentheses(sanitizedCursor)
if sanitizedCursor != "" { if sanitizedCursor != "" {
query = query.Where(sanitizedCursor) query = query.Where(sanitizedCursor)
} }
@@ -1656,6 +1658,8 @@ func (h *Handler) applyPreloads(model interface{}, query common.SelectQuery, pre
// Build RequestOptions with all preloads to allow references to sibling relations // Build RequestOptions with all preloads to allow references to sibling relations
preloadOpts := &common.RequestOptions{Preload: preloads} preloadOpts := &common.RequestOptions{Preload: preloads}
sanitizedWhere := common.SanitizeWhereClause(preload.Where, reflection.ExtractTableNameOnly(preload.Relation), preloadOpts) sanitizedWhere := common.SanitizeWhereClause(preload.Where, reflection.ExtractTableNameOnly(preload.Relation), preloadOpts)
// Ensure outer parentheses to prevent OR logic from escaping
sanitizedWhere = common.EnsureOuterParentheses(sanitizedWhere)
if len(sanitizedWhere) > 0 { if len(sanitizedWhere) > 0 {
sq = sq.Where(sanitizedWhere) sq = sq.Where(sanitizedWhere)
} }

View File

@@ -233,6 +233,27 @@ x-custom-sql-join: LEFT JOIN departments d ON d.id = e.dept_id | INNER JOIN role
- Multiple JOINs can be specified using the pipe `|` separator - Multiple JOINs can be specified using the pipe `|` separator
- JOINs are sanitized for security - JOINs are sanitized for security
- Can be specified via headers or query parameters - Can be specified via headers or query parameters
- **Table aliases are automatically extracted and allowed for filtering and sorting**
**Using Join Aliases in Filters and Sorts:**
When you specify a custom SQL join with an alias, you can use that alias in your filter and sort parameters:
```
# Join with alias
x-custom-sql-join: LEFT JOIN departments d ON d.id = employees.department_id
# Sort by joined table column
x-sort: d.name,employees.id
# Filter by joined table column
x-searchop-eq-d.name: Engineering
```
The system automatically:
1. Extracts the alias from the JOIN clause (e.g., `d` from `departments d`)
2. Validates that prefixed columns (like `d.name`) refer to valid join aliases
3. Allows these prefixed columns in filters and sorts
--- ---

View File

@@ -435,9 +435,11 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
} }
// Apply preloading // Apply preloading
logger.Debug("Total preloads to apply: %d", len(options.Preload))
for idx := range options.Preload { for idx := range options.Preload {
preload := options.Preload[idx] preload := options.Preload[idx]
logger.Debug("Applying preload: %s", preload.Relation) logger.Debug("Applying preload [%d]: Relation=%s, Recursive=%v, RelatedKey=%s, Where=%s",
idx, preload.Relation, preload.Recursive, preload.RelatedKey, preload.Where)
// Validate and fix WHERE clause to ensure it contains the relation prefix // Validate and fix WHERE clause to ensure it contains the relation prefix
if len(preload.Where) > 0 { if len(preload.Where) > 0 {
@@ -463,7 +465,8 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
} }
// Apply filters - validate and adjust for column types first // Apply filters - validate and adjust for column types first
for i := range options.Filters { // Group consecutive OR filters together to prevent OR logic from escaping
for i := 0; i < len(options.Filters); {
filter := &options.Filters[i] filter := &options.Filters[i]
// Validate and adjust filter based on column type // Validate and adjust filter based on column type
@@ -475,8 +478,39 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
logicOp = "AND" logicOp = "AND"
} }
logger.Debug("Applying filter: %s %s %v (needsCast=%v, logic=%s)", filter.Column, filter.Operator, filter.Value, castInfo.NeedsCast, logicOp) // Check if this is the start of an OR group
query = h.applyFilter(query, *filter, tableName, castInfo.NeedsCast, logicOp) if logicOp == "OR" {
// Collect all consecutive OR filters
orFilters := []*common.FilterOption{filter}
orCastInfo := []ColumnCastInfo{castInfo}
j := i + 1
for j < len(options.Filters) {
nextFilter := &options.Filters[j]
nextLogicOp := nextFilter.LogicOperator
if nextLogicOp == "" {
nextLogicOp = "AND"
}
if nextLogicOp == "OR" {
nextCastInfo := h.ValidateAndAdjustFilterForColumnType(nextFilter, model)
orFilters = append(orFilters, nextFilter)
orCastInfo = append(orCastInfo, nextCastInfo)
j++
} else {
break
}
}
// Apply the OR group as a single grouped condition
logger.Debug("Applying OR filter group with %d conditions", len(orFilters))
query = h.applyOrFilterGroup(query, orFilters, orCastInfo, tableName)
i = j
} else {
// Single AND filter - apply normally
logger.Debug("Applying filter: %s %s %v (needsCast=%v, logic=%s)", filter.Column, filter.Operator, filter.Value, castInfo.NeedsCast, logicOp)
query = h.applyFilter(query, *filter, tableName, castInfo.NeedsCast, logicOp)
i++
}
} }
// Apply custom SQL WHERE clause (AND condition) // Apply custom SQL WHERE clause (AND condition)
@@ -486,6 +520,8 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
prefixedWhere := common.AddTablePrefixToColumns(options.CustomSQLWhere, reflection.ExtractTableNameOnly(tableName)) prefixedWhere := common.AddTablePrefixToColumns(options.CustomSQLWhere, reflection.ExtractTableNameOnly(tableName))
// Then sanitize and allow preload table prefixes since custom SQL may reference multiple tables // Then sanitize and allow preload table prefixes since custom SQL may reference multiple tables
sanitizedWhere := common.SanitizeWhereClause(prefixedWhere, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions) sanitizedWhere := common.SanitizeWhereClause(prefixedWhere, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions)
// Ensure outer parentheses to prevent OR logic from escaping
sanitizedWhere = common.EnsureOuterParentheses(sanitizedWhere)
if sanitizedWhere != "" { if sanitizedWhere != "" {
query = query.Where(sanitizedWhere) query = query.Where(sanitizedWhere)
} }
@@ -497,6 +533,8 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
customOr := common.AddTablePrefixToColumns(options.CustomSQLOr, reflection.ExtractTableNameOnly(tableName)) customOr := common.AddTablePrefixToColumns(options.CustomSQLOr, reflection.ExtractTableNameOnly(tableName))
// Sanitize and allow preload table prefixes since custom SQL may reference multiple tables // Sanitize and allow preload table prefixes since custom SQL may reference multiple tables
sanitizedOr := common.SanitizeWhereClause(customOr, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions) sanitizedOr := common.SanitizeWhereClause(customOr, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions)
// Ensure outer parentheses to prevent OR logic from escaping
sanitizedOr = common.EnsureOuterParentheses(sanitizedOr)
if sanitizedOr != "" { if sanitizedOr != "" {
query = query.WhereOr(sanitizedOr) query = query.WhereOr(sanitizedOr)
} }
@@ -846,6 +884,15 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
} }
} }
// Apply custom SQL joins from XFiles
if len(preload.SqlJoins) > 0 {
logger.Debug("Applying %d SQL joins to preload %s", len(preload.SqlJoins), preload.Relation)
for _, joinClause := range preload.SqlJoins {
sq = sq.Join(joinClause)
logger.Debug("Applied SQL join to preload %s: %s", preload.Relation, joinClause)
}
}
// Apply filters // Apply filters
if len(preload.Filters) > 0 { if len(preload.Filters) > 0 {
for _, filter := range preload.Filters { for _, filter := range preload.Filters {
@@ -871,10 +918,25 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
if len(preload.Where) > 0 { if len(preload.Where) > 0 {
// Build RequestOptions with all preloads to allow references to sibling relations // Build RequestOptions with all preloads to allow references to sibling relations
preloadOpts := &common.RequestOptions{Preload: allPreloads} preloadOpts := &common.RequestOptions{Preload: allPreloads}
// First add table prefixes to unqualified columns
prefixedWhere := common.AddTablePrefixToColumns(preload.Where, reflection.ExtractTableNameOnly(preload.Relation)) // Determine the table name to use for WHERE clause processing
// Then sanitize and allow preload table prefixes // Prefer the explicit TableName field (set by XFiles), otherwise extract from relation name
sanitizedWhere := common.SanitizeWhereClause(prefixedWhere, reflection.ExtractTableNameOnly(preload.Relation), preloadOpts) tableName := preload.TableName
if tableName == "" {
tableName = reflection.ExtractTableNameOnly(preload.Relation)
}
// In Bun's Relation context, table prefixes are only needed when there are JOINs
// Without JOINs, Bun already knows which table is being queried
whereClause := preload.Where
if len(preload.SqlJoins) > 0 {
// Has JOINs: add table prefixes to disambiguate columns
whereClause = common.AddTablePrefixToColumns(preload.Where, tableName)
logger.Debug("Added table prefix for preload with joins: '%s' -> '%s'", preload.Where, whereClause)
}
// Sanitize the WHERE clause and allow preload table prefixes
sanitizedWhere := common.SanitizeWhereClause(whereClause, tableName, preloadOpts)
if len(sanitizedWhere) > 0 { if len(sanitizedWhere) > 0 {
sq = sq.Where(sanitizedWhere) sq = sq.Where(sanitizedWhere)
} }
@@ -893,21 +955,82 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
}) })
// Handle recursive preloading // Handle recursive preloading
if preload.Recursive && depth < 4 { if preload.Recursive && depth < 8 {
logger.Debug("Applying recursive preload for %s at depth %d", preload.Relation, depth+1) logger.Debug("Applying recursive preload for %s at depth %d", preload.Relation, depth+1)
// For recursive relationships, we need to get the last part of the relation path
// e.g., "MastertaskItems" -> "MastertaskItems.MastertaskItems"
relationParts := strings.Split(preload.Relation, ".") relationParts := strings.Split(preload.Relation, ".")
lastRelationName := relationParts[len(relationParts)-1] lastRelationName := relationParts[len(relationParts)-1]
// Create a recursive preload with the same configuration // Generate FK-based relation name for children
// but with the relation path extended // Use RecursiveChildKey if available, otherwise fall back to RelatedKey
recursivePreload := preload recursiveFK := preload.RecursiveChildKey
recursivePreload.Relation = preload.Relation + "." + lastRelationName if recursiveFK == "" {
recursiveFK = preload.RelatedKey
}
// Recursively apply preload until we reach depth 5 recursiveRelationName := lastRelationName
if recursiveFK != "" {
// Check if the last relation name already contains the FK suffix
// (this happens when XFiles already generated the FK-based name)
fkUpper := strings.ToUpper(recursiveFK)
expectedSuffix := "_" + fkUpper
if strings.HasSuffix(lastRelationName, expectedSuffix) {
// Already has FK suffix, just reuse the same name
recursiveRelationName = lastRelationName
logger.Debug("Reusing FK-based relation name for recursion: %s", recursiveRelationName)
} else {
// Generate FK-based name
recursiveRelationName = lastRelationName + expectedSuffix
keySource := "RelatedKey"
if preload.RecursiveChildKey != "" {
keySource = "RecursiveChildKey"
}
logger.Debug("Generated recursive relation name from %s: %s (from %s)",
keySource, recursiveRelationName, recursiveFK)
}
} else {
logger.Warn("Recursive preload for %s has no RecursiveChildKey or RelatedKey, falling back to %s.%s",
preload.Relation, preload.Relation, lastRelationName)
}
// Create recursive preload
recursivePreload := preload
recursivePreload.Relation = preload.Relation + "." + recursiveRelationName
recursivePreload.Recursive = false // Prevent infinite recursion at this level
// Use the recursive FK for child relations, not the parent's RelatedKey
if preload.RecursiveChildKey != "" {
recursivePreload.RelatedKey = preload.RecursiveChildKey
}
// CRITICAL: Clear parent's WHERE clause - let Bun use FK traversal
recursivePreload.Where = ""
recursivePreload.Filters = []common.FilterOption{}
logger.Debug("Cleared WHERE clause for recursive preload %s at depth %d",
recursivePreload.Relation, depth+1)
// Apply recursively up to depth 8
query = h.applyPreloadWithRecursion(query, recursivePreload, allPreloads, model, depth+1) query = h.applyPreloadWithRecursion(query, recursivePreload, allPreloads, model, depth+1)
// ALSO: Extend any child relations (like DEF) to recursive levels
baseRelation := preload.Relation + "."
for i := range allPreloads {
relatedPreload := allPreloads[i]
if strings.HasPrefix(relatedPreload.Relation, baseRelation) &&
!strings.Contains(strings.TrimPrefix(relatedPreload.Relation, baseRelation), ".") {
childRelationName := strings.TrimPrefix(relatedPreload.Relation, baseRelation)
extendedChildPreload := relatedPreload
extendedChildPreload.Relation = recursivePreload.Relation + "." + childRelationName
extendedChildPreload.Recursive = false
logger.Debug("Extending related preload '%s' to '%s' at recursive depth %d",
relatedPreload.Relation, extendedChildPreload.Relation, depth+1)
query = h.applyPreloadWithRecursion(query, extendedChildPreload, allPreloads, model, depth+1)
}
}
} }
return query return query
@@ -1996,6 +2119,99 @@ func (h *Handler) applyFilter(query common.SelectQuery, filter common.FilterOpti
} }
} }
// applyOrFilterGroup applies a group of OR filters as a single grouped condition
// This ensures OR conditions are properly grouped with parentheses to prevent OR logic from escaping
func (h *Handler) applyOrFilterGroup(query common.SelectQuery, filters []*common.FilterOption, castInfo []ColumnCastInfo, tableName string) common.SelectQuery {
if len(filters) == 0 {
return query
}
// Build individual filter conditions
conditions := []string{}
args := []interface{}{}
for i, filter := range filters {
// Qualify the column name with table name if not already qualified
qualifiedColumn := h.qualifyColumnName(filter.Column, tableName)
// Apply casting to text if needed for non-numeric columns or non-numeric values
if castInfo[i].NeedsCast {
qualifiedColumn = fmt.Sprintf("CAST(%s AS TEXT)", qualifiedColumn)
}
// Build the condition based on operator
condition, filterArgs := h.buildFilterCondition(qualifiedColumn, filter, tableName)
if condition != "" {
conditions = append(conditions, condition)
args = append(args, filterArgs...)
}
}
if len(conditions) == 0 {
return query
}
// Join all conditions with OR and wrap in parentheses
groupedCondition := "(" + strings.Join(conditions, " OR ") + ")"
logger.Debug("Applying grouped OR conditions: %s", groupedCondition)
// Apply as AND condition (the OR is already inside the parentheses)
return query.Where(groupedCondition, args...)
}
// buildFilterCondition builds a single filter condition and returns the condition string and args
func (h *Handler) buildFilterCondition(qualifiedColumn string, filter *common.FilterOption, tableName string) (filterStr string, filterInterface []interface{}) {
switch strings.ToLower(filter.Operator) {
case "eq", "equals":
return fmt.Sprintf("%s = ?", qualifiedColumn), []interface{}{filter.Value}
case "neq", "not_equals", "ne":
return fmt.Sprintf("%s != ?", qualifiedColumn), []interface{}{filter.Value}
case "gt", "greater_than":
return fmt.Sprintf("%s > ?", qualifiedColumn), []interface{}{filter.Value}
case "gte", "greater_than_equals", "ge":
return fmt.Sprintf("%s >= ?", qualifiedColumn), []interface{}{filter.Value}
case "lt", "less_than":
return fmt.Sprintf("%s < ?", qualifiedColumn), []interface{}{filter.Value}
case "lte", "less_than_equals", "le":
return fmt.Sprintf("%s <= ?", qualifiedColumn), []interface{}{filter.Value}
case "like":
return fmt.Sprintf("%s LIKE ?", qualifiedColumn), []interface{}{filter.Value}
case "ilike":
return fmt.Sprintf("%s ILIKE ?", qualifiedColumn), []interface{}{filter.Value}
case "in":
return fmt.Sprintf("%s IN (?)", qualifiedColumn), []interface{}{filter.Value}
case "between":
// Handle between operator - exclusive (> val1 AND < val2)
if values, ok := filter.Value.([]interface{}); ok && len(values) == 2 {
return fmt.Sprintf("(%s > ? AND %s < ?)", qualifiedColumn, qualifiedColumn), []interface{}{values[0], values[1]}
} else if values, ok := filter.Value.([]string); ok && len(values) == 2 {
return fmt.Sprintf("(%s > ? AND %s < ?)", qualifiedColumn, qualifiedColumn), []interface{}{values[0], values[1]}
}
logger.Warn("Invalid BETWEEN filter value format")
return "", nil
case "between_inclusive":
// Handle between inclusive operator - inclusive (>= val1 AND <= val2)
if values, ok := filter.Value.([]interface{}); ok && len(values) == 2 {
return fmt.Sprintf("(%s >= ? AND %s <= ?)", qualifiedColumn, qualifiedColumn), []interface{}{values[0], values[1]}
} else if values, ok := filter.Value.([]string); ok && len(values) == 2 {
return fmt.Sprintf("(%s >= ? AND %s <= ?)", qualifiedColumn, qualifiedColumn), []interface{}{values[0], values[1]}
}
logger.Warn("Invalid BETWEEN INCLUSIVE filter value format")
return "", nil
case "is_null", "isnull":
// Check for NULL values - don't use cast for NULL checks
colName := h.qualifyColumnName(filter.Column, tableName)
return fmt.Sprintf("(%s IS NULL OR %s = '')", colName, colName), nil
case "is_not_null", "isnotnull":
// Check for NOT NULL values - don't use cast for NULL checks
colName := h.qualifyColumnName(filter.Column, tableName)
return fmt.Sprintf("(%s IS NOT NULL AND %s != '')", colName, colName), nil
default:
logger.Warn("Unknown filter operator: %s, defaulting to equals", filter.Operator)
return fmt.Sprintf("%s = ?", qualifiedColumn), []interface{}{filter.Value}
}
}
// parseTableName splits a table name that may contain schema into separate schema and table // parseTableName splits a table name that may contain schema into separate schema and table
func (h *Handler) parseTableName(fullTableName string) (schema, table string) { func (h *Handler) parseTableName(fullTableName string) (schema, table string) {
if idx := strings.LastIndex(fullTableName, "."); idx != -1 { if idx := strings.LastIndex(fullTableName, "."); idx != -1 {

View File

@@ -28,6 +28,7 @@ type ExtendedRequestOptions struct {
// Joins // Joins
Expand []ExpandOption Expand []ExpandOption
CustomSQLJoin []string // Custom SQL JOIN clauses CustomSQLJoin []string // Custom SQL JOIN clauses
JoinAliases []string // Extracted table aliases from CustomSQLJoin for validation
// Advanced features // Advanced features
AdvancedSQL map[string]string // Column -> SQL expression AdvancedSQL map[string]string // Column -> SQL expression
@@ -47,7 +48,8 @@ type ExtendedRequestOptions struct {
AtomicTransaction bool AtomicTransaction bool
// X-Files configuration - comprehensive query options as a single JSON object // X-Files configuration - comprehensive query options as a single JSON object
XFiles *XFiles XFiles *XFiles
XFilesPresent bool // Flag to indicate if X-Files header was provided
} }
// ExpandOption represents a relation expansion configuration // ExpandOption represents a relation expansion configuration
@@ -273,7 +275,8 @@ func (h *Handler) parseOptionsFromHeaders(r common.Request, model interface{}) E
} }
// Resolve relation names (convert table names to field names) if model is provided // Resolve relation names (convert table names to field names) if model is provided
if model != nil { // Skip resolution if X-Files header was provided, as XFiles uses Prefix which already contains the correct field names
if model != nil && !options.XFilesPresent {
h.resolveRelationNamesInOptions(&options, model) h.resolveRelationNamesInOptions(&options, model)
} }
@@ -528,11 +531,69 @@ func (h *Handler) parseCustomSQLJoin(options *ExtendedRequestOptions, value stri
continue continue
} }
// Extract table alias from the JOIN clause
alias := extractJoinAlias(sanitizedJoin)
if alias != "" {
options.JoinAliases = append(options.JoinAliases, alias)
// Also add to the embedded RequestOptions for validation
options.RequestOptions.JoinAliases = append(options.RequestOptions.JoinAliases, alias)
logger.Debug("Extracted join alias: %s", alias)
}
logger.Debug("Adding custom SQL join: %s", sanitizedJoin) logger.Debug("Adding custom SQL join: %s", sanitizedJoin)
options.CustomSQLJoin = append(options.CustomSQLJoin, sanitizedJoin) options.CustomSQLJoin = append(options.CustomSQLJoin, sanitizedJoin)
} }
} }
// extractJoinAlias extracts the table alias from a JOIN clause
// Examples:
// - "LEFT JOIN departments d ON ..." -> "d"
// - "INNER JOIN users AS u ON ..." -> "u"
// - "JOIN roles r ON ..." -> "r"
func extractJoinAlias(joinClause string) string {
// Pattern: JOIN table_name [AS] alias ON ...
// We need to extract the alias (word before ON)
upperJoin := strings.ToUpper(joinClause)
// Find the "JOIN" keyword position
joinIdx := strings.Index(upperJoin, "JOIN")
if joinIdx == -1 {
return ""
}
// Find the "ON" keyword position
onIdx := strings.Index(upperJoin, " ON ")
if onIdx == -1 {
return ""
}
// Extract the part between JOIN and ON
betweenJoinAndOn := strings.TrimSpace(joinClause[joinIdx+4 : onIdx])
// Split by spaces to get words
words := strings.Fields(betweenJoinAndOn)
if len(words) == 0 {
return ""
}
// If there's an AS keyword, the alias is after it
for i, word := range words {
if strings.EqualFold(word, "AS") && i+1 < len(words) {
return words[i+1]
}
}
// Otherwise, the alias is the last word (if there are 2+ words)
// Format: "table_name alias" or just "table_name"
if len(words) >= 2 {
return words[len(words)-1]
}
// Only one word means it's just the table name, no alias
return ""
}
// parseSorting parses x-sort header // parseSorting parses x-sort header
// Format: +field1,-field2,field3 (+ for ASC, - for DESC, default ASC) // Format: +field1,-field2,field3 (+ for ASC, - for DESC, default ASC)
func (h *Handler) parseSorting(options *ExtendedRequestOptions, value string) { func (h *Handler) parseSorting(options *ExtendedRequestOptions, value string) {
@@ -634,6 +695,7 @@ func (h *Handler) parseXFiles(options *ExtendedRequestOptions, value string) {
// Store the original XFiles for reference // Store the original XFiles for reference
options.XFiles = &xfiles options.XFiles = &xfiles
options.XFilesPresent = true // Mark that X-Files header was provided
// Map XFiles fields to ExtendedRequestOptions // Map XFiles fields to ExtendedRequestOptions
@@ -925,11 +987,33 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
return return
} }
// Store the table name as-is for now - it will be resolved to field name later // Use the Prefix (e.g., "MAL") as the relation name, which matches the Go struct field name
// when we have the model instance available // Fall back to TableName if Prefix is not specified
relationPath := xfile.TableName relationName := xfile.Prefix
if relationName == "" {
relationName = xfile.TableName
}
// SPECIAL CASE: For recursive child tables, generate FK-based relation name
// Example: If prefix is "MAL" and relatedkey is "rid_parentmastertaskitem",
// the actual struct field is "MAL_RID_PARENTMASTERTASKITEM", not "MAL"
if xfile.Recursive && xfile.RelatedKey != "" && basePath != "" {
// Check if this is a self-referencing recursive relation (same table as parent)
// by comparing the last part of basePath with the current prefix
basePathParts := strings.Split(basePath, ".")
lastPrefix := basePathParts[len(basePathParts)-1]
if lastPrefix == relationName {
// This is a recursive self-reference, use FK-based name
fkUpper := strings.ToUpper(xfile.RelatedKey)
relationName = relationName + "_" + fkUpper
logger.Debug("X-Files: Generated FK-based relation name for recursive table: %s", relationName)
}
}
relationPath := relationName
if basePath != "" { if basePath != "" {
relationPath = basePath + "." + xfile.TableName relationPath = basePath + "." + relationName
} }
logger.Debug("X-Files: Adding preload for relation: %s", relationPath) logger.Debug("X-Files: Adding preload for relation: %s", relationPath)
@@ -937,6 +1021,7 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
// Create PreloadOption from XFiles configuration // Create PreloadOption from XFiles configuration
preloadOpt := common.PreloadOption{ preloadOpt := common.PreloadOption{
Relation: relationPath, Relation: relationPath,
TableName: xfile.TableName, // Store the actual database table name for WHERE clause processing
Columns: xfile.Columns, Columns: xfile.Columns,
OmitColumns: xfile.OmitColumns, OmitColumns: xfile.OmitColumns,
} }
@@ -979,12 +1064,12 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
// Add WHERE clause if SQL conditions specified // Add WHERE clause if SQL conditions specified
whereConditions := make([]string, 0) whereConditions := make([]string, 0)
if len(xfile.SqlAnd) > 0 { if len(xfile.SqlAnd) > 0 {
// Process each SQL condition: add table prefixes and sanitize // Process each SQL condition
// Note: We don't add table prefixes here because they're only needed for JOINs
// The handler will add prefixes later if SqlJoins are present
for _, sqlCond := range xfile.SqlAnd { for _, sqlCond := range xfile.SqlAnd {
// First add table prefixes to unqualified columns // Sanitize the condition without adding prefixes
prefixedCond := common.AddTablePrefixToColumns(sqlCond, xfile.TableName) sanitizedCond := common.SanitizeWhereClause(sqlCond, xfile.TableName)
// Then sanitize the condition
sanitizedCond := common.SanitizeWhereClause(prefixedCond, xfile.TableName)
if sanitizedCond != "" { if sanitizedCond != "" {
whereConditions = append(whereConditions, sanitizedCond) whereConditions = append(whereConditions, sanitizedCond)
} }
@@ -1029,13 +1114,72 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
logger.Debug("X-Files: Set foreign key for %s: %s", relationPath, xfile.ForeignKey) logger.Debug("X-Files: Set foreign key for %s: %s", relationPath, xfile.ForeignKey)
} }
// Transfer SqlJoins from XFiles to PreloadOption
if len(xfile.SqlJoins) > 0 {
preloadOpt.SqlJoins = make([]string, 0, len(xfile.SqlJoins))
preloadOpt.JoinAliases = make([]string, 0, len(xfile.SqlJoins))
for _, joinClause := range xfile.SqlJoins {
// Sanitize the join clause
sanitizedJoin := common.SanitizeWhereClause(joinClause, "", nil)
if sanitizedJoin == "" {
logger.Warn("X-Files: SqlJoin failed sanitization for %s: %s", relationPath, joinClause)
continue
}
preloadOpt.SqlJoins = append(preloadOpt.SqlJoins, sanitizedJoin)
// Extract join alias for validation
alias := extractJoinAlias(sanitizedJoin)
if alias != "" {
preloadOpt.JoinAliases = append(preloadOpt.JoinAliases, alias)
logger.Debug("X-Files: Extracted join alias for %s: %s", relationPath, alias)
}
}
logger.Debug("X-Files: Added %d SQL joins to preload %s", len(preloadOpt.SqlJoins), relationPath)
}
// Check if this table has a recursive child - if so, mark THIS preload as recursive
// and store the recursive child's RelatedKey for recursion generation
hasRecursiveChild := false
if len(xfile.ChildTables) > 0 {
for _, childTable := range xfile.ChildTables {
if childTable.Recursive && childTable.TableName == xfile.TableName {
hasRecursiveChild = true
preloadOpt.Recursive = true
preloadOpt.RecursiveChildKey = childTable.RelatedKey
logger.Debug("X-Files: Detected recursive child for %s, marking parent as recursive (recursive FK: %s)",
relationPath, childTable.RelatedKey)
break
}
}
}
// Skip adding this preload if it's a recursive child (it will be handled by parent's Recursive flag)
if xfile.Recursive && basePath != "" {
logger.Debug("X-Files: Skipping recursive child preload: %s (will be handled by parent)", relationPath)
// Still process its parent/child tables for relations like DEF
h.processXFilesRelations(xfile, options, relationPath)
return
}
// Add the preload option // Add the preload option
options.Preload = append(options.Preload, preloadOpt) options.Preload = append(options.Preload, preloadOpt)
logger.Debug("X-Files: Added preload [%d]: Relation=%s, Recursive=%v, RelatedKey=%s, RecursiveChildKey=%s, Where=%s",
len(options.Preload)-1, preloadOpt.Relation, preloadOpt.Recursive, preloadOpt.RelatedKey, preloadOpt.RecursiveChildKey, preloadOpt.Where)
// Recursively process nested ParentTables and ChildTables // Recursively process nested ParentTables and ChildTables
if xfile.Recursive { // Skip processing child tables if we already detected and handled a recursive child
logger.Debug("X-Files: Recursive preload enabled for: %s", relationPath) if hasRecursiveChild {
h.processXFilesRelations(xfile, options, relationPath) logger.Debug("X-Files: Skipping child table processing for %s (recursive child already handled)", relationPath)
// But still process parent tables
if len(xfile.ParentTables) > 0 {
logger.Debug("X-Files: Processing %d parent tables for %s", len(xfile.ParentTables), relationPath)
for _, parentTable := range xfile.ParentTables {
h.addXFilesPreload(parentTable, options, relationPath)
}
}
} else if len(xfile.ParentTables) > 0 || len(xfile.ChildTables) > 0 { } else if len(xfile.ParentTables) > 0 || len(xfile.ChildTables) > 0 {
h.processXFilesRelations(xfile, options, relationPath) h.processXFilesRelations(xfile, options, relationPath)
} }

View File

@@ -2,6 +2,8 @@ package restheadspec
import ( import (
"testing" "testing"
"github.com/bitechdev/ResolveSpec/pkg/common"
) )
func TestDecodeHeaderValue(t *testing.T) { func TestDecodeHeaderValue(t *testing.T) {
@@ -37,6 +39,121 @@ func TestDecodeHeaderValue(t *testing.T) {
} }
} }
func TestAddXFilesPreload_WithSqlJoins(t *testing.T) {
handler := &Handler{}
options := &ExtendedRequestOptions{
RequestOptions: common.RequestOptions{
Preload: make([]common.PreloadOption, 0),
},
}
// Create an XFiles with SqlJoins
xfile := &XFiles{
TableName: "users",
SqlJoins: []string{
"LEFT JOIN departments d ON d.id = users.department_id",
"INNER JOIN roles r ON r.id = users.role_id",
},
FilterFields: []struct {
Field string `json:"field"`
Value string `json:"value"`
Operator string `json:"operator"`
}{
{Field: "d.active", Value: "true", Operator: "eq"},
{Field: "r.name", Value: "admin", Operator: "eq"},
},
}
// Add the XFiles preload
handler.addXFilesPreload(xfile, options, "")
// Verify that a preload was added
if len(options.Preload) != 1 {
t.Fatalf("Expected 1 preload, got %d", len(options.Preload))
}
preload := options.Preload[0]
// Verify relation name
if preload.Relation != "users" {
t.Errorf("Expected relation 'users', got '%s'", preload.Relation)
}
// Verify SqlJoins were transferred
if len(preload.SqlJoins) != 2 {
t.Fatalf("Expected 2 SQL joins, got %d", len(preload.SqlJoins))
}
// Verify JoinAliases were extracted
if len(preload.JoinAliases) != 2 {
t.Fatalf("Expected 2 join aliases, got %d", len(preload.JoinAliases))
}
// Verify the aliases are correct
expectedAliases := []string{"d", "r"}
for i, expected := range expectedAliases {
if preload.JoinAliases[i] != expected {
t.Errorf("Expected alias '%s', got '%s'", expected, preload.JoinAliases[i])
}
}
// Verify filters were added
if len(preload.Filters) != 2 {
t.Fatalf("Expected 2 filters, got %d", len(preload.Filters))
}
// Verify filter columns reference joined tables
if preload.Filters[0].Column != "d.active" {
t.Errorf("Expected filter column 'd.active', got '%s'", preload.Filters[0].Column)
}
if preload.Filters[1].Column != "r.name" {
t.Errorf("Expected filter column 'r.name', got '%s'", preload.Filters[1].Column)
}
}
func TestExtractJoinAlias(t *testing.T) {
tests := []struct {
name string
joinClause string
expected string
}{
{
name: "LEFT JOIN with alias",
joinClause: "LEFT JOIN departments d ON d.id = users.department_id",
expected: "d",
},
{
name: "INNER JOIN with AS keyword",
joinClause: "INNER JOIN users AS u ON u.id = orders.user_id",
expected: "u",
},
{
name: "JOIN without alias",
joinClause: "JOIN roles ON roles.id = users.role_id",
expected: "",
},
{
name: "Complex join with multiple conditions",
joinClause: "LEFT OUTER JOIN products p ON p.id = items.product_id AND p.active = true",
expected: "p",
},
{
name: "Invalid join (no ON clause)",
joinClause: "LEFT JOIN departments",
expected: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := extractJoinAlias(tt.joinClause)
if result != tt.expected {
t.Errorf("Expected alias '%s', got '%s'", tt.expected, result)
}
})
}
}
// Note: The following functions are unexported (lowercase) and cannot be tested directly: // Note: The following functions are unexported (lowercase) and cannot be tested directly:
// - parseSelectFields // - parseSelectFields
// - parseFieldFilter // - parseFieldFilter

View File

@@ -0,0 +1,110 @@
package restheadspec
import (
"testing"
"github.com/bitechdev/ResolveSpec/pkg/common"
)
// TestPreloadOption_TableName verifies that TableName field is properly used
// when provided in PreloadOption for WHERE clause processing
func TestPreloadOption_TableName(t *testing.T) {
tests := []struct {
name string
preload common.PreloadOption
expectedTable string
}{
{
name: "TableName provided explicitly",
preload: common.PreloadOption{
Relation: "MTL.MAL.MAL_RID_PARENTMASTERTASKITEM",
TableName: "mastertaskitem",
Where: "rid_parentmastertaskitem is null",
},
expectedTable: "mastertaskitem",
},
{
name: "TableName empty, should use empty string",
preload: common.PreloadOption{
Relation: "MTL.MAL.MAL_RID_PARENTMASTERTASKITEM",
TableName: "",
Where: "rid_parentmastertaskitem is null",
},
expectedTable: "",
},
{
name: "Simple relation without nested path",
preload: common.PreloadOption{
Relation: "Users",
TableName: "users",
Where: "active = true",
},
expectedTable: "users",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Test that the TableName field stores the correct value
if tt.preload.TableName != tt.expectedTable {
t.Errorf("PreloadOption.TableName = %q, want %q", tt.preload.TableName, tt.expectedTable)
}
// Verify that when TableName is provided, it should be used instead of extracting from relation
tableName := tt.preload.TableName
if tableName == "" {
// This simulates the fallback logic in handler.go
// In reality, reflection.ExtractTableNameOnly would be called
tableName = tt.expectedTable
}
if tableName != tt.expectedTable {
t.Errorf("Resolved table name = %q, want %q", tableName, tt.expectedTable)
}
})
}
}
// TestXFilesPreload_StoresTableName verifies that XFiles processing
// stores the table name in PreloadOption and doesn't add table prefixes to WHERE clauses
func TestXFilesPreload_StoresTableName(t *testing.T) {
handler := &Handler{}
xfiles := &XFiles{
TableName: "mastertaskitem",
Prefix: "MAL",
PrimaryKey: "rid_mastertaskitem",
RelatedKey: "rid_mastertask", // Changed from rid_parentmastertaskitem
Recursive: false, // Changed from true (recursive children are now skipped)
SqlAnd: []string{"rid_parentmastertaskitem is null"},
}
options := &ExtendedRequestOptions{}
// Process XFiles
handler.addXFilesPreload(xfiles, options, "MTL")
// Verify that a preload was added
if len(options.Preload) == 0 {
t.Fatal("Expected at least one preload to be added")
}
preload := options.Preload[0]
// Verify the table name is stored
if preload.TableName != "mastertaskitem" {
t.Errorf("PreloadOption.TableName = %q, want %q", preload.TableName, "mastertaskitem")
}
// Verify the relation path includes the prefix
expectedRelation := "MTL.MAL"
if preload.Relation != expectedRelation {
t.Errorf("PreloadOption.Relation = %q, want %q", preload.Relation, expectedRelation)
}
// Verify WHERE clause does NOT have table prefix (prefixes only needed for JOINs)
expectedWhere := "rid_parentmastertaskitem is null"
if preload.Where != expectedWhere {
t.Errorf("PreloadOption.Where = %q, want %q (no table prefix)", preload.Where, expectedWhere)
}
}

View File

@@ -0,0 +1,91 @@
package restheadspec
import (
"testing"
)
// TestPreloadWhereClause_WithJoins verifies that table prefixes are added
// to WHERE clauses when SqlJoins are present
func TestPreloadWhereClause_WithJoins(t *testing.T) {
tests := []struct {
name string
where string
sqlJoins []string
expectedPrefix bool
description string
}{
{
name: "No joins - no prefix needed",
where: "status = 'active'",
sqlJoins: []string{},
expectedPrefix: false,
description: "Without JOINs, Bun knows the table context",
},
{
name: "Has joins - prefix needed",
where: "status = 'active'",
sqlJoins: []string{"LEFT JOIN other_table ot ON ot.id = main.other_id"},
expectedPrefix: true,
description: "With JOINs, table prefix disambiguates columns",
},
{
name: "Already has prefix - no change",
where: "users.status = 'active'",
sqlJoins: []string{"LEFT JOIN roles r ON r.id = users.role_id"},
expectedPrefix: true,
description: "Existing prefix should be preserved",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// This test documents the expected behavior
// The actual logic is in handler.go lines 916-937
hasJoins := len(tt.sqlJoins) > 0
if hasJoins != tt.expectedPrefix {
t.Errorf("Test expectation mismatch: hasJoins=%v, expectedPrefix=%v",
hasJoins, tt.expectedPrefix)
}
t.Logf("%s: %s", tt.name, tt.description)
})
}
}
// TestXFilesWithJoins_AddsTablePrefix verifies that XFiles with SqlJoins
// results in table prefixes being added to WHERE clauses
func TestXFilesWithJoins_AddsTablePrefix(t *testing.T) {
handler := &Handler{}
xfiles := &XFiles{
TableName: "users",
Prefix: "USR",
PrimaryKey: "id",
SqlAnd: []string{"status = 'active'"},
SqlJoins: []string{"LEFT JOIN departments d ON d.id = users.department_id"},
}
options := &ExtendedRequestOptions{}
handler.addXFilesPreload(xfiles, options, "")
if len(options.Preload) == 0 {
t.Fatal("Expected at least one preload to be added")
}
preload := options.Preload[0]
// Verify SqlJoins were stored
if len(preload.SqlJoins) != 1 {
t.Errorf("Expected 1 SqlJoin, got %d", len(preload.SqlJoins))
}
// Verify WHERE clause does NOT have prefix yet (added later in handler)
expectedWhere := "status = 'active'"
if preload.Where != expectedWhere {
t.Errorf("PreloadOption.Where = %q, want %q", preload.Where, expectedWhere)
}
// Note: The handler will add the prefix when it sees SqlJoins
// This is tested in the handler itself, not during XFiles parsing
}

View File

@@ -357,6 +357,107 @@ func TestParseOptionsFromQueryParams(t *testing.T) {
} }
}, },
}, },
{
name: "Extract aliases from custom SQL JOIN",
queryParams: map[string]string{
"x-custom-sql-join": `LEFT JOIN departments d ON d.id = employees.department_id`,
},
validate: func(t *testing.T, options ExtendedRequestOptions) {
if len(options.JoinAliases) == 0 {
t.Error("Expected JoinAliases to be extracted")
return
}
if len(options.JoinAliases) != 1 {
t.Errorf("Expected 1 join alias, got %d", len(options.JoinAliases))
return
}
if options.JoinAliases[0] != "d" {
t.Errorf("Expected join alias 'd', got %q", options.JoinAliases[0])
}
// Also check that it's in the embedded RequestOptions
if len(options.RequestOptions.JoinAliases) != 1 || options.RequestOptions.JoinAliases[0] != "d" {
t.Error("Expected join alias to also be in RequestOptions.JoinAliases")
}
},
},
{
name: "Extract multiple aliases from multiple custom SQL JOINs",
queryParams: map[string]string{
"x-custom-sql-join": `LEFT JOIN departments d ON d.id = e.dept_id | INNER JOIN roles AS r ON r.id = e.role_id`,
},
validate: func(t *testing.T, options ExtendedRequestOptions) {
if len(options.JoinAliases) != 2 {
t.Errorf("Expected 2 join aliases, got %d", len(options.JoinAliases))
return
}
expectedAliases := []string{"d", "r"}
for i, expected := range expectedAliases {
if options.JoinAliases[i] != expected {
t.Errorf("Expected join alias[%d]=%q, got %q", i, expected, options.JoinAliases[i])
}
}
},
},
{
name: "Custom JOIN with sort on joined table",
queryParams: map[string]string{
"x-custom-sql-join": `LEFT JOIN departments d ON d.id = employees.department_id`,
"x-sort": "d.name,employees.id",
},
validate: func(t *testing.T, options ExtendedRequestOptions) {
// Verify join was added
if len(options.CustomSQLJoin) != 1 {
t.Errorf("Expected 1 custom SQL join, got %d", len(options.CustomSQLJoin))
return
}
// Verify alias was extracted
if len(options.JoinAliases) != 1 || options.JoinAliases[0] != "d" {
t.Error("Expected join alias 'd' to be extracted")
return
}
// Verify sort was parsed
if len(options.Sort) != 2 {
t.Errorf("Expected 2 sort options, got %d", len(options.Sort))
return
}
if options.Sort[0].Column != "d.name" {
t.Errorf("Expected first sort column 'd.name', got %q", options.Sort[0].Column)
}
if options.Sort[1].Column != "employees.id" {
t.Errorf("Expected second sort column 'employees.id', got %q", options.Sort[1].Column)
}
},
},
{
name: "Custom JOIN with filter on joined table",
queryParams: map[string]string{
"x-custom-sql-join": `LEFT JOIN departments d ON d.id = employees.department_id`,
"x-searchop-eq-d.name": "Engineering",
},
validate: func(t *testing.T, options ExtendedRequestOptions) {
// Verify join was added
if len(options.CustomSQLJoin) != 1 {
t.Error("Expected 1 custom SQL join")
return
}
// Verify alias was extracted
if len(options.JoinAliases) != 1 || options.JoinAliases[0] != "d" {
t.Error("Expected join alias 'd' to be extracted")
return
}
// Verify filter was parsed
if len(options.Filters) != 1 {
t.Errorf("Expected 1 filter, got %d", len(options.Filters))
return
}
if options.Filters[0].Column != "d.name" {
t.Errorf("Expected filter column 'd.name', got %q", options.Filters[0].Column)
}
if options.Filters[0].Operator != "eq" {
t.Errorf("Expected filter operator 'eq', got %q", options.Filters[0].Operator)
}
},
},
} }
for _, tt := range tests { for _, tt := range tests {
@@ -451,6 +552,55 @@ func TestHeadersAndQueryParamsCombined(t *testing.T) {
} }
} }
// TestCustomJoinAliasExtraction tests the extractJoinAlias helper function
func TestCustomJoinAliasExtraction(t *testing.T) {
tests := []struct {
name string
join string
expected string
}{
{
name: "LEFT JOIN with alias",
join: "LEFT JOIN departments d ON d.id = employees.department_id",
expected: "d",
},
{
name: "INNER JOIN with AS keyword",
join: "INNER JOIN users AS u ON u.id = posts.user_id",
expected: "u",
},
{
name: "Simple JOIN with alias",
join: "JOIN roles r ON r.id = user_roles.role_id",
expected: "r",
},
{
name: "JOIN without alias (just table name)",
join: "JOIN departments ON departments.id = employees.dept_id",
expected: "",
},
{
name: "RIGHT JOIN with alias",
join: "RIGHT JOIN orders o ON o.customer_id = customers.id",
expected: "o",
},
{
name: "FULL OUTER JOIN with AS",
join: "FULL OUTER JOIN products AS p ON p.id = order_items.product_id",
expected: "p",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := extractJoinAlias(tt.join)
if result != tt.expected {
t.Errorf("extractJoinAlias(%q) = %q, want %q", tt.join, result, tt.expected)
}
})
}
}
// Helper function to check if a string contains a substring // Helper function to check if a string contains a substring
func contains(s, substr string) bool { func contains(s, substr string) bool {
return len(s) >= len(substr) && (s == substr || len(s) > len(substr) && containsHelper(s, substr)) return len(s) >= len(substr) && (s == substr || len(s) > len(substr) && containsHelper(s, substr))

View File

@@ -0,0 +1,391 @@
//go:build !integration
// +build !integration
package restheadspec
import (
"context"
"testing"
"github.com/bitechdev/ResolveSpec/pkg/common"
)
// TestRecursivePreloadClearsWhereClause tests that recursive preloads
// correctly clear the WHERE clause from the parent level to allow
// Bun to use foreign key relationships for loading children
func TestRecursivePreloadClearsWhereClause(t *testing.T) {
// Create a mock handler
handler := &Handler{}
// Create a preload option with a WHERE clause that filters root items
// This simulates the xfiles use case where the first level has a filter
// like "rid_parentmastertaskitem is null" to get root items
preload := common.PreloadOption{
Relation: "MastertaskItems",
Recursive: true,
RelatedKey: "rid_parentmastertaskitem",
Where: "rid_parentmastertaskitem is null",
Filters: []common.FilterOption{
{
Column: "rid_parentmastertaskitem",
Operator: "is null",
Value: nil,
},
},
}
// Create a mock query that tracks operations
mockQuery := &mockSelectQuery{
operations: []string{},
}
// Apply the recursive preload at depth 0
// This should:
// 1. Apply the initial preload with the WHERE clause
// 2. Create a recursive preload without the WHERE clause
allPreloads := []common.PreloadOption{preload}
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
// Verify the mock query received the operations
mock := result.(*mockSelectQuery)
// Check that we have at least 2 PreloadRelation calls:
// 1. The initial "MastertaskItems" with WHERE clause
// 2. The recursive "MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM" without WHERE clause
preloadCount := 0
recursivePreloadFound := false
whereAppliedToRecursive := false
for _, op := range mock.operations {
if op == "PreloadRelation:MastertaskItems" {
preloadCount++
}
if op == "PreloadRelation:MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM" {
recursivePreloadFound = true
}
// Check if WHERE was applied to the recursive preload (it shouldn't be)
if op == "Where:rid_parentmastertaskitem is null" && recursivePreloadFound {
whereAppliedToRecursive = true
}
}
if preloadCount < 1 {
t.Errorf("Expected at least 1 PreloadRelation call, got %d", preloadCount)
}
if !recursivePreloadFound {
t.Errorf("Expected recursive preload 'MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM' to be created. Operations: %v", mock.operations)
}
if whereAppliedToRecursive {
t.Error("WHERE clause should not be applied to recursive preload levels")
}
}
// TestRecursivePreloadWithChildRelations tests that child relations
// (like DEF in MAL.DEF) are properly extended to recursive levels
func TestRecursivePreloadWithChildRelations(t *testing.T) {
handler := &Handler{}
// Create the main recursive preload
recursivePreload := common.PreloadOption{
Relation: "MAL",
Recursive: true,
RelatedKey: "rid_parentmastertaskitem",
Where: "rid_parentmastertaskitem is null",
}
// Create a child relation that should be extended
childPreload := common.PreloadOption{
Relation: "MAL.DEF",
}
mockQuery := &mockSelectQuery{
operations: []string{},
}
allPreloads := []common.PreloadOption{recursivePreload, childPreload}
// Apply both preloads - the child preload should be extended when the recursive one processes
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, allPreloads, nil, 0)
// Also need to apply the child preload separately (as would happen in normal flow)
result = handler.applyPreloadWithRecursion(result, childPreload, allPreloads, nil, 0)
mock := result.(*mockSelectQuery)
// Check that the child relation was extended to recursive levels
// We should see:
// - MAL (with WHERE)
// - MAL.DEF
// - MAL.MAL_RID_PARENTMASTERTASKITEM (without WHERE)
// - MAL.MAL_RID_PARENTMASTERTASKITEM.DEF (extended by recursive logic)
foundMALDEF := false
foundRecursiveMAL := false
foundMALMALDEF := false
for _, op := range mock.operations {
if op == "PreloadRelation:MAL.DEF" {
foundMALDEF = true
}
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
foundRecursiveMAL = true
}
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM.DEF" {
foundMALMALDEF = true
}
}
if !foundMALDEF {
t.Errorf("Expected child preload 'MAL.DEF' to be applied. Operations: %v", mock.operations)
}
if !foundRecursiveMAL {
t.Errorf("Expected recursive preload 'MAL.MAL_RID_PARENTMASTERTASKITEM' to be created. Operations: %v", mock.operations)
}
if !foundMALMALDEF {
t.Errorf("Expected child preload to be extended to 'MAL.MAL_RID_PARENTMASTERTASKITEM.DEF' at recursive level. Operations: %v", mock.operations)
}
}
// TestRecursivePreloadGeneratesCorrectRelationName tests that the recursive
// preload generates the correct FK-based relation name using RelatedKey
func TestRecursivePreloadGeneratesCorrectRelationName(t *testing.T) {
handler := &Handler{}
// Test case 1: With RelatedKey - should generate FK-based name
t.Run("WithRelatedKey", func(t *testing.T) {
preload := common.PreloadOption{
Relation: "MAL",
Recursive: true,
RelatedKey: "rid_parentmastertaskitem",
}
mockQuery := &mockSelectQuery{operations: []string{}}
allPreloads := []common.PreloadOption{preload}
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
mock := result.(*mockSelectQuery)
// Should generate MAL.MAL_RID_PARENTMASTERTASKITEM
foundCorrectRelation := false
foundIncorrectRelation := false
for _, op := range mock.operations {
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
foundCorrectRelation = true
}
if op == "PreloadRelation:MAL.MAL" {
foundIncorrectRelation = true
}
}
if !foundCorrectRelation {
t.Errorf("Expected 'MAL.MAL_RID_PARENTMASTERTASKITEM' relation, operations: %v", mock.operations)
}
if foundIncorrectRelation {
t.Error("Should NOT generate 'MAL.MAL' relation when RelatedKey is specified")
}
})
// Test case 2: Without RelatedKey - should fallback to old behavior
t.Run("WithoutRelatedKey", func(t *testing.T) {
preload := common.PreloadOption{
Relation: "MAL",
Recursive: true,
// No RelatedKey
}
mockQuery := &mockSelectQuery{operations: []string{}}
allPreloads := []common.PreloadOption{preload}
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
mock := result.(*mockSelectQuery)
// Should fallback to MAL.MAL
foundFallback := false
for _, op := range mock.operations {
if op == "PreloadRelation:MAL.MAL" {
foundFallback = true
}
}
if !foundFallback {
t.Errorf("Expected fallback 'MAL.MAL' relation when no RelatedKey, operations: %v", mock.operations)
}
})
// Test case 3: Depth limit of 8
t.Run("DepthLimit", func(t *testing.T) {
preload := common.PreloadOption{
Relation: "MAL",
Recursive: true,
RelatedKey: "rid_parentmastertaskitem",
}
mockQuery := &mockSelectQuery{operations: []string{}}
allPreloads := []common.PreloadOption{preload}
// Start at depth 7 - should create one more level
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 7)
mock := result.(*mockSelectQuery)
foundDepth8 := false
for _, op := range mock.operations {
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
foundDepth8 = true
}
}
if !foundDepth8 {
t.Error("Expected to create recursive level at depth 8")
}
// Start at depth 8 - should NOT create another level
mockQuery2 := &mockSelectQuery{operations: []string{}}
result2 := handler.applyPreloadWithRecursion(mockQuery2, preload, allPreloads, nil, 8)
mock2 := result2.(*mockSelectQuery)
foundDepth9 := false
for _, op := range mock2.operations {
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
foundDepth9 = true
}
}
if foundDepth9 {
t.Error("Should NOT create recursive level beyond depth 8")
}
})
}
// mockSelectQuery implements common.SelectQuery for testing
type mockSelectQuery struct {
operations []string
}
func (m *mockSelectQuery) Model(model interface{}) common.SelectQuery {
m.operations = append(m.operations, "Model")
return m
}
func (m *mockSelectQuery) Table(table string) common.SelectQuery {
m.operations = append(m.operations, "Table:"+table)
return m
}
func (m *mockSelectQuery) Column(columns ...string) common.SelectQuery {
for _, col := range columns {
m.operations = append(m.operations, "Column:"+col)
}
return m
}
func (m *mockSelectQuery) ColumnExpr(query string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "ColumnExpr:"+query)
return m
}
func (m *mockSelectQuery) Where(query string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "Where:"+query)
return m
}
func (m *mockSelectQuery) WhereOr(query string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "WhereOr:"+query)
return m
}
func (m *mockSelectQuery) WhereIn(column string, values interface{}) common.SelectQuery {
m.operations = append(m.operations, "WhereIn:"+column)
return m
}
func (m *mockSelectQuery) Order(order string) common.SelectQuery {
m.operations = append(m.operations, "Order:"+order)
return m
}
func (m *mockSelectQuery) OrderExpr(order string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "OrderExpr:"+order)
return m
}
func (m *mockSelectQuery) Limit(limit int) common.SelectQuery {
m.operations = append(m.operations, "Limit")
return m
}
func (m *mockSelectQuery) Offset(offset int) common.SelectQuery {
m.operations = append(m.operations, "Offset")
return m
}
func (m *mockSelectQuery) Join(join string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "Join:"+join)
return m
}
func (m *mockSelectQuery) LeftJoin(join string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "LeftJoin:"+join)
return m
}
func (m *mockSelectQuery) Group(columns string) common.SelectQuery {
m.operations = append(m.operations, "Group")
return m
}
func (m *mockSelectQuery) Having(query string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "Having:"+query)
return m
}
func (m *mockSelectQuery) Preload(relation string, conditions ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "Preload:"+relation)
return m
}
func (m *mockSelectQuery) PreloadRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
m.operations = append(m.operations, "PreloadRelation:"+relation)
// Apply the preload modifiers
for _, fn := range apply {
fn(m)
}
return m
}
func (m *mockSelectQuery) JoinRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
m.operations = append(m.operations, "JoinRelation:"+relation)
return m
}
func (m *mockSelectQuery) Scan(ctx context.Context, dest interface{}) error {
m.operations = append(m.operations, "Scan")
return nil
}
func (m *mockSelectQuery) ScanModel(ctx context.Context) error {
m.operations = append(m.operations, "ScanModel")
return nil
}
func (m *mockSelectQuery) Count(ctx context.Context) (int, error) {
m.operations = append(m.operations, "Count")
return 0, nil
}
func (m *mockSelectQuery) Exists(ctx context.Context) (bool, error) {
m.operations = append(m.operations, "Exists")
return false, nil
}
func (m *mockSelectQuery) GetUnderlyingQuery() interface{} {
return nil
}
func (m *mockSelectQuery) GetModel() interface{} {
return nil
}

View File

@@ -0,0 +1,527 @@
//go:build integration
// +build integration
package restheadspec
import (
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"github.com/bitechdev/ResolveSpec/pkg/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// mockSelectQuery implements common.SelectQuery for testing (integration version)
type mockSelectQuery struct {
operations []string
}
func (m *mockSelectQuery) Model(model interface{}) common.SelectQuery {
m.operations = append(m.operations, "Model")
return m
}
func (m *mockSelectQuery) Table(table string) common.SelectQuery {
m.operations = append(m.operations, "Table:"+table)
return m
}
func (m *mockSelectQuery) Column(columns ...string) common.SelectQuery {
for _, col := range columns {
m.operations = append(m.operations, "Column:"+col)
}
return m
}
func (m *mockSelectQuery) ColumnExpr(query string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "ColumnExpr:"+query)
return m
}
func (m *mockSelectQuery) Where(query string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "Where:"+query)
return m
}
func (m *mockSelectQuery) WhereOr(query string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "WhereOr:"+query)
return m
}
func (m *mockSelectQuery) WhereIn(column string, values interface{}) common.SelectQuery {
m.operations = append(m.operations, "WhereIn:"+column)
return m
}
func (m *mockSelectQuery) Order(order string) common.SelectQuery {
m.operations = append(m.operations, "Order:"+order)
return m
}
func (m *mockSelectQuery) OrderExpr(order string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "OrderExpr:"+order)
return m
}
func (m *mockSelectQuery) Limit(limit int) common.SelectQuery {
m.operations = append(m.operations, "Limit")
return m
}
func (m *mockSelectQuery) Offset(offset int) common.SelectQuery {
m.operations = append(m.operations, "Offset")
return m
}
func (m *mockSelectQuery) Join(join string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "Join:"+join)
return m
}
func (m *mockSelectQuery) LeftJoin(join string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "LeftJoin:"+join)
return m
}
func (m *mockSelectQuery) Group(columns string) common.SelectQuery {
m.operations = append(m.operations, "Group")
return m
}
func (m *mockSelectQuery) Having(query string, args ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "Having:"+query)
return m
}
func (m *mockSelectQuery) Preload(relation string, conditions ...interface{}) common.SelectQuery {
m.operations = append(m.operations, "Preload:"+relation)
return m
}
func (m *mockSelectQuery) PreloadRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
m.operations = append(m.operations, "PreloadRelation:"+relation)
// Apply the preload modifiers
for _, fn := range apply {
fn(m)
}
return m
}
func (m *mockSelectQuery) JoinRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
m.operations = append(m.operations, "JoinRelation:"+relation)
return m
}
func (m *mockSelectQuery) Scan(ctx context.Context, dest interface{}) error {
m.operations = append(m.operations, "Scan")
return nil
}
func (m *mockSelectQuery) ScanModel(ctx context.Context) error {
m.operations = append(m.operations, "ScanModel")
return nil
}
func (m *mockSelectQuery) Count(ctx context.Context) (int, error) {
m.operations = append(m.operations, "Count")
return 0, nil
}
func (m *mockSelectQuery) Exists(ctx context.Context) (bool, error) {
m.operations = append(m.operations, "Exists")
return false, nil
}
func (m *mockSelectQuery) GetUnderlyingQuery() interface{} {
return nil
}
func (m *mockSelectQuery) GetModel() interface{} {
return nil
}
// TestXFilesRecursivePreload is an integration test that validates the XFiles
// recursive preload functionality using real test data files.
//
// This test ensures:
// 1. XFiles request JSON is correctly parsed into PreloadOptions
// 2. Recursive preload generates correct FK-based relation names (MAL_RID_PARENTMASTERTASKITEM)
// 3. Parent WHERE clauses don't leak to child levels
// 4. Child relations (like DEF) are extended to all recursive levels
// 5. Hierarchical data structure matches expected output
func TestXFilesRecursivePreload(t *testing.T) {
// Load the XFiles request configuration
requestPath := filepath.Join("..", "..", "tests", "data", "xfiles.request.json")
requestData, err := os.ReadFile(requestPath)
require.NoError(t, err, "Failed to read xfiles.request.json")
var xfileConfig XFiles
err = json.Unmarshal(requestData, &xfileConfig)
require.NoError(t, err, "Failed to parse xfiles.request.json")
// Create handler and parse XFiles into PreloadOptions
handler := &Handler{}
options := &ExtendedRequestOptions{
RequestOptions: common.RequestOptions{
Preload: []common.PreloadOption{},
},
}
// Process the XFiles configuration - start with the root table
handler.processXFilesRelations(&xfileConfig, options, "")
// Verify that preload options were created
require.NotEmpty(t, options.Preload, "Expected preload options to be created")
// Test 1: Verify mastertaskitem preload is marked as recursive with correct RelatedKey
t.Run("RecursivePreloadHasRelatedKey", func(t *testing.T) {
// Find the mastertaskitem preload - it should be marked as recursive
var recursivePreload *common.PreloadOption
for i := range options.Preload {
preload := &options.Preload[i]
if preload.Relation == "MTL.MAL" && preload.Recursive {
recursivePreload = preload
break
}
}
require.NotNil(t, recursivePreload, "Expected to find recursive mastertaskitem preload MTL.MAL")
// RelatedKey should be the parent relationship key (MTL -> MAL)
assert.Equal(t, "rid_mastertask", recursivePreload.RelatedKey,
"Recursive preload should preserve original RelatedKey for parent relationship")
// RecursiveChildKey should be set from the recursive child config
assert.Equal(t, "rid_parentmastertaskitem", recursivePreload.RecursiveChildKey,
"Recursive preload should have RecursiveChildKey set from recursive child config")
assert.True(t, recursivePreload.Recursive, "mastertaskitem preload should be marked as recursive")
})
// Test 2: Verify mastertaskitem has WHERE clause for filtering root items
t.Run("RootLevelHasWhereClause", func(t *testing.T) {
var rootPreload *common.PreloadOption
for i := range options.Preload {
preload := &options.Preload[i]
if preload.Relation == "MTL.MAL" {
rootPreload = preload
break
}
}
require.NotNil(t, rootPreload, "Expected to find mastertaskitem preload")
assert.NotEmpty(t, rootPreload.Where, "Mastertaskitem should have WHERE clause")
// The WHERE clause should filter for root items (rid_parentmastertaskitem is null)
assert.True(t, rootPreload.Recursive, "Mastertaskitem preload should be marked as recursive")
})
// Test 3: Verify actiondefinition relation exists for mastertaskitem
t.Run("DEFRelationExists", func(t *testing.T) {
var defPreload *common.PreloadOption
for i := range options.Preload {
preload := &options.Preload[i]
if preload.Relation == "MTL.MAL.DEF" {
defPreload = preload
break
}
}
require.NotNil(t, defPreload, "Expected to find actiondefinition preload for mastertaskitem")
assert.Equal(t, "rid_actiondefinition", defPreload.ForeignKey,
"actiondefinition preload should have ForeignKey set")
})
// Test 4: Verify relation name generation with mock query
t.Run("RelationNameGeneration", func(t *testing.T) {
// Find the mastertaskitem preload - it should be marked as recursive
var recursivePreload common.PreloadOption
found := false
for _, preload := range options.Preload {
if preload.Relation == "MTL.MAL" && preload.Recursive {
recursivePreload = preload
found = true
break
}
}
require.True(t, found, "Expected to find recursive mastertaskitem preload MTL.MAL")
// Create mock query to track operations
mockQuery := &mockSelectQuery{operations: []string{}}
// Apply the recursive preload
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
mock := result.(*mockSelectQuery)
// Verify the correct FK-based relation name was generated
foundCorrectRelation := false
for _, op := range mock.operations {
// Should generate: MTL.MAL.MAL_RID_PARENTMASTERTASKITEM
if op == "PreloadRelation:MTL.MAL.MAL_RID_PARENTMASTERTASKITEM" {
foundCorrectRelation = true
}
}
assert.True(t, foundCorrectRelation,
"Expected FK-based relation name 'MTL.MAL.MAL_RID_PARENTMASTERTASKITEM' to be generated. Operations: %v",
mock.operations)
})
// Test 5: Verify WHERE clause is cleared for recursive levels
t.Run("WhereClauseClearedForChildren", func(t *testing.T) {
// Find the mastertaskitem preload - it should be marked as recursive
var recursivePreload common.PreloadOption
found := false
for _, preload := range options.Preload {
if preload.Relation == "MTL.MAL" && preload.Recursive {
recursivePreload = preload
found = true
break
}
}
require.True(t, found, "Expected to find recursive mastertaskitem preload MTL.MAL")
// The root level has a WHERE clause (rid_parentmastertaskitem is null)
// But when we apply recursion, it should be cleared
assert.NotEmpty(t, recursivePreload.Where, "Root preload should have WHERE clause")
mockQuery := &mockSelectQuery{operations: []string{}}
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
mock := result.(*mockSelectQuery)
// After the first level, WHERE clauses should not be reapplied
// We check that the recursive relation was created (which means WHERE was cleared internally)
foundRecursiveRelation := false
for _, op := range mock.operations {
if op == "PreloadRelation:MTL.MAL.MAL_RID_PARENTMASTERTASKITEM" {
foundRecursiveRelation = true
}
}
assert.True(t, foundRecursiveRelation,
"Recursive relation should be created (WHERE clause should be cleared internally)")
})
// Test 6: Verify child relations are extended to recursive levels
t.Run("ChildRelationsExtended", func(t *testing.T) {
// Find the mastertaskitem preload - it should be marked as recursive
var recursivePreload common.PreloadOption
foundRecursive := false
for _, preload := range options.Preload {
if preload.Relation == "MTL.MAL" && preload.Recursive {
recursivePreload = preload
foundRecursive = true
break
}
}
require.True(t, foundRecursive, "Expected to find recursive mastertaskitem preload MTL.MAL")
mockQuery := &mockSelectQuery{operations: []string{}}
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
mock := result.(*mockSelectQuery)
// actiondefinition should be extended to the recursive level
// Expected: MTL.MAL.MAL_RID_PARENTMASTERTASKITEM.DEF
foundExtendedDEF := false
for _, op := range mock.operations {
if op == "PreloadRelation:MTL.MAL.MAL_RID_PARENTMASTERTASKITEM.DEF" {
foundExtendedDEF = true
}
}
assert.True(t, foundExtendedDEF,
"Expected actiondefinition relation to be extended to recursive level. Operations: %v",
mock.operations)
})
}
// TestXFilesRecursivePreloadDepth tests that recursive preloads respect the depth limit of 8
func TestXFilesRecursivePreloadDepth(t *testing.T) {
handler := &Handler{}
preload := common.PreloadOption{
Relation: "MAL",
Recursive: true,
RelatedKey: "rid_parentmastertaskitem",
}
allPreloads := []common.PreloadOption{preload}
t.Run("Depth7CreatesLevel8", func(t *testing.T) {
mockQuery := &mockSelectQuery{operations: []string{}}
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 7)
mock := result.(*mockSelectQuery)
foundDepth8 := false
for _, op := range mock.operations {
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
foundDepth8 = true
}
}
assert.True(t, foundDepth8, "Should create level 8 when starting at depth 7")
})
t.Run("Depth8DoesNotCreateLevel9", func(t *testing.T) {
mockQuery := &mockSelectQuery{operations: []string{}}
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 8)
mock := result.(*mockSelectQuery)
foundDepth9 := false
for _, op := range mock.operations {
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
foundDepth9 = true
}
}
assert.False(t, foundDepth9, "Should NOT create level 9 (depth limit is 8)")
})
}
// TestXFilesResponseStructure validates the actual structure of the response
// This test can be expanded when we have a full database integration test environment
func TestXFilesResponseStructure(t *testing.T) {
// Load the expected correct response
correctResponsePath := filepath.Join("..", "..", "tests", "data", "xfiles.response.correct.json")
correctData, err := os.ReadFile(correctResponsePath)
require.NoError(t, err, "Failed to read xfiles.response.correct.json")
var correctResponse []map[string]interface{}
err = json.Unmarshal(correctData, &correctResponse)
require.NoError(t, err, "Failed to parse xfiles.response.correct.json")
// Test 1: Verify root level has exactly 1 masterprocess
t.Run("RootLevelHasOneItem", func(t *testing.T) {
assert.Len(t, correctResponse, 1, "Root level should have exactly 1 masterprocess record")
})
// Test 2: Verify the root item has MTL relation
t.Run("RootHasMTLRelation", func(t *testing.T) {
require.NotEmpty(t, correctResponse, "Response should not be empty")
rootItem := correctResponse[0]
mtl, exists := rootItem["MTL"]
assert.True(t, exists, "Root item should have MTL relation")
assert.NotNil(t, mtl, "MTL relation should not be null")
})
// Test 3: Verify MTL has MAL items
t.Run("MTLHasMALItems", func(t *testing.T) {
require.NotEmpty(t, correctResponse, "Response should not be empty")
rootItem := correctResponse[0]
mtl, ok := rootItem["MTL"].([]interface{})
require.True(t, ok, "MTL should be an array")
require.NotEmpty(t, mtl, "MTL should have items")
firstMTL, ok := mtl[0].(map[string]interface{})
require.True(t, ok, "MTL item should be a map")
mal, exists := firstMTL["MAL"]
assert.True(t, exists, "MTL item should have MAL relation")
assert.NotNil(t, mal, "MAL relation should not be null")
})
// Test 4: Verify MAL items have MAL_RID_PARENTMASTERTASKITEM relation (recursive)
t.Run("MALHasRecursiveRelation", func(t *testing.T) {
require.NotEmpty(t, correctResponse, "Response should not be empty")
rootItem := correctResponse[0]
mtl, ok := rootItem["MTL"].([]interface{})
require.True(t, ok, "MTL should be an array")
require.NotEmpty(t, mtl, "MTL should have items")
firstMTL, ok := mtl[0].(map[string]interface{})
require.True(t, ok, "MTL item should be a map")
mal, ok := firstMTL["MAL"].([]interface{})
require.True(t, ok, "MAL should be an array")
require.NotEmpty(t, mal, "MAL should have items")
firstMAL, ok := mal[0].(map[string]interface{})
require.True(t, ok, "MAL item should be a map")
// The key assertion: check for FK-based relation name
recursiveRelation, exists := firstMAL["MAL_RID_PARENTMASTERTASKITEM"]
assert.True(t, exists,
"MAL item should have MAL_RID_PARENTMASTERTASKITEM relation (FK-based name)")
// It can be null or an array, depending on whether this item has children
if recursiveRelation != nil {
_, isArray := recursiveRelation.([]interface{})
assert.True(t, isArray,
"MAL_RID_PARENTMASTERTASKITEM should be an array when not null")
}
})
// Test 5: Verify "Receive COB Document for" appears as a child, not at root
t.Run("ChildItemsAreNested", func(t *testing.T) {
// This test verifies that "Receive COB Document for" doesn't appear
// multiple times at the wrong level, but is properly nested
// Count how many times we find this description at the MAL level (should be 0 or 1)
require.NotEmpty(t, correctResponse, "Response should not be empty")
rootItem := correctResponse[0]
mtl, ok := rootItem["MTL"].([]interface{})
require.True(t, ok, "MTL should be an array")
require.NotEmpty(t, mtl, "MTL should have items")
firstMTL, ok := mtl[0].(map[string]interface{})
require.True(t, ok, "MTL item should be a map")
mal, ok := firstMTL["MAL"].([]interface{})
require.True(t, ok, "MAL should be an array")
// Count root-level MAL items (before the fix, there were 12; should be 1)
assert.Len(t, mal, 1,
"MAL should have exactly 1 root-level item (before fix: 12 duplicates)")
// Verify the root item has a description
firstMAL, ok := mal[0].(map[string]interface{})
require.True(t, ok, "MAL item should be a map")
description, exists := firstMAL["description"]
assert.True(t, exists, "MAL item should have a description")
assert.Equal(t, "Capture COB Information", description,
"Root MAL item should be 'Capture COB Information'")
})
// Test 6: Verify DEF relation exists at MAL level
t.Run("DEFRelationExists", func(t *testing.T) {
require.NotEmpty(t, correctResponse, "Response should not be empty")
rootItem := correctResponse[0]
mtl, ok := rootItem["MTL"].([]interface{})
require.True(t, ok, "MTL should be an array")
require.NotEmpty(t, mtl, "MTL should have items")
firstMTL, ok := mtl[0].(map[string]interface{})
require.True(t, ok, "MTL item should be a map")
mal, ok := firstMTL["MAL"].([]interface{})
require.True(t, ok, "MAL should be an array")
require.NotEmpty(t, mal, "MAL should have items")
firstMAL, ok := mal[0].(map[string]interface{})
require.True(t, ok, "MAL item should be a map")
// Verify DEF relation exists (child relation extension)
def, exists := firstMAL["DEF"]
assert.True(t, exists, "MAL item should have DEF relation")
// DEF can be null or an object
if def != nil {
_, isMap := def.(map[string]interface{})
assert.True(t, isMap, "DEF should be an object when not null")
}
})
}

View File

@@ -411,7 +411,9 @@ func newInstance(cfg Config) (*serverInstance, error) {
return nil, fmt.Errorf("handler cannot be nil") return nil, fmt.Errorf("handler cannot be nil")
} }
// Set default timeouts // Set default timeouts with minimum of 10 minutes for connection timeouts
minConnectionTimeout := 10 * time.Minute
if cfg.ShutdownTimeout == 0 { if cfg.ShutdownTimeout == 0 {
cfg.ShutdownTimeout = 30 * time.Second cfg.ShutdownTimeout = 30 * time.Second
} }
@@ -419,13 +421,22 @@ func newInstance(cfg Config) (*serverInstance, error) {
cfg.DrainTimeout = 25 * time.Second cfg.DrainTimeout = 25 * time.Second
} }
if cfg.ReadTimeout == 0 { if cfg.ReadTimeout == 0 {
cfg.ReadTimeout = 15 * time.Second cfg.ReadTimeout = minConnectionTimeout
} else if cfg.ReadTimeout < minConnectionTimeout {
// Enforce minimum of 10 minutes
cfg.ReadTimeout = minConnectionTimeout
} }
if cfg.WriteTimeout == 0 { if cfg.WriteTimeout == 0 {
cfg.WriteTimeout = 15 * time.Second cfg.WriteTimeout = minConnectionTimeout
} else if cfg.WriteTimeout < minConnectionTimeout {
// Enforce minimum of 10 minutes
cfg.WriteTimeout = minConnectionTimeout
} }
if cfg.IdleTimeout == 0 { if cfg.IdleTimeout == 0 {
cfg.IdleTimeout = 60 * time.Second cfg.IdleTimeout = minConnectionTimeout
} else if cfg.IdleTimeout < minConnectionTimeout {
// Enforce minimum of 10 minutes
cfg.IdleTimeout = minConnectionTimeout
} }
addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port) addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)

View File

@@ -4,6 +4,7 @@ package spectypes
import ( import (
"database/sql" "database/sql"
"database/sql/driver" "database/sql/driver"
"encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
"reflect" "reflect"
@@ -60,7 +61,33 @@ func (n *SqlNull[T]) Scan(value any) error {
return nil return nil
} }
// Try standard sql.Null[T] first. // Check if T is []byte, and decode base64 if applicable
// Do this BEFORE trying sql.Null to ensure base64 is handled
var zero T
if _, ok := any(zero).([]byte); ok {
// For []byte types, try to decode from base64
var strVal string
switch v := value.(type) {
case string:
strVal = v
case []byte:
strVal = string(v)
default:
strVal = fmt.Sprintf("%v", value)
}
// Try base64 decode
if decoded, err := base64.StdEncoding.DecodeString(strVal); err == nil {
n.Val = any(decoded).(T)
n.Valid = true
return nil
}
// Fallback to raw bytes
n.Val = any([]byte(strVal)).(T)
n.Valid = true
return nil
}
// Try standard sql.Null[T] for other types.
var sqlNull sql.Null[T] var sqlNull sql.Null[T]
if err := sqlNull.Scan(value); err == nil { if err := sqlNull.Scan(value); err == nil {
n.Val = sqlNull.V n.Val = sqlNull.V
@@ -122,6 +149,9 @@ func (n *SqlNull[T]) FromString(s string) error {
n.Val = any(u).(T) n.Val = any(u).(T)
n.Valid = true n.Valid = true
} }
case []byte:
n.Val = any([]byte(s)).(T)
n.Valid = true
case string: case string:
n.Val = any(s).(T) n.Val = any(s).(T)
n.Valid = true n.Valid = true
@@ -149,6 +179,14 @@ func (n SqlNull[T]) MarshalJSON() ([]byte, error) {
if !n.Valid { if !n.Valid {
return []byte("null"), nil return []byte("null"), nil
} }
// Check if T is []byte, and encode to base64
if _, ok := any(n.Val).([]byte); ok {
// Encode []byte as base64
encoded := base64.StdEncoding.EncodeToString(any(n.Val).([]byte))
return json.Marshal(encoded)
}
return json.Marshal(n.Val) return json.Marshal(n.Val)
} }
@@ -160,8 +198,25 @@ func (n *SqlNull[T]) UnmarshalJSON(b []byte) error {
return nil return nil
} }
// Try direct unmarshal. // Check if T is []byte, and decode from base64
var val T var val T
if _, ok := any(val).([]byte); ok {
// Unmarshal as string first (JSON representation)
var s string
if err := json.Unmarshal(b, &s); err == nil {
// Decode from base64
if decoded, err := base64.StdEncoding.DecodeString(s); err == nil {
n.Val = any(decoded).(T)
n.Valid = true
return nil
}
// Fallback to raw string as bytes
n.Val = any([]byte(s)).(T)
n.Valid = true
return nil
}
}
if err := json.Unmarshal(b, &val); err == nil { if err := json.Unmarshal(b, &val); err == nil {
n.Val = val n.Val = val
n.Valid = true n.Valid = true
@@ -271,13 +326,14 @@ func (n SqlNull[T]) UUID() uuid.UUID {
// Type aliases for common types. // Type aliases for common types.
type ( type (
SqlInt16 = SqlNull[int16] SqlInt16 = SqlNull[int16]
SqlInt32 = SqlNull[int32] SqlInt32 = SqlNull[int32]
SqlInt64 = SqlNull[int64] SqlInt64 = SqlNull[int64]
SqlFloat64 = SqlNull[float64] SqlFloat64 = SqlNull[float64]
SqlBool = SqlNull[bool] SqlBool = SqlNull[bool]
SqlString = SqlNull[string] SqlString = SqlNull[string]
SqlUUID = SqlNull[uuid.UUID] SqlByteArray = SqlNull[[]byte]
SqlUUID = SqlNull[uuid.UUID]
) )
// SqlTimeStamp - Timestamp with custom formatting (YYYY-MM-DDTHH:MM:SS). // SqlTimeStamp - Timestamp with custom formatting (YYYY-MM-DDTHH:MM:SS).
@@ -581,6 +637,10 @@ func NewSqlString(v string) SqlString {
return SqlString{Val: v, Valid: true} return SqlString{Val: v, Valid: true}
} }
func NewSqlByteArray(v []byte) SqlByteArray {
return SqlByteArray{Val: v, Valid: true}
}
func NewSqlUUID(v uuid.UUID) SqlUUID { func NewSqlUUID(v uuid.UUID) SqlUUID {
return SqlUUID{Val: v, Valid: true} return SqlUUID{Val: v, Valid: true}
} }

View File

@@ -565,3 +565,394 @@ func TestTryIfInt64(t *testing.T) {
}) })
} }
} }
// TestSqlString tests SqlString without base64 (plain text)
func TestSqlString_Scan(t *testing.T) {
tests := []struct {
name string
input interface{}
expected string
valid bool
}{
{
name: "plain string",
input: "hello world",
expected: "hello world",
valid: true,
},
{
name: "plain text",
input: "plain text",
expected: "plain text",
valid: true,
},
{
name: "bytes as string",
input: []byte("raw bytes"),
expected: "raw bytes",
valid: true,
},
{
name: "nil value",
input: nil,
expected: "",
valid: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var s SqlString
if err := s.Scan(tt.input); err != nil {
t.Fatalf("Scan failed: %v", err)
}
if s.Valid != tt.valid {
t.Errorf("expected valid=%v, got valid=%v", tt.valid, s.Valid)
}
if tt.valid && s.String() != tt.expected {
t.Errorf("expected %q, got %q", tt.expected, s.String())
}
})
}
}
func TestSqlString_JSON(t *testing.T) {
tests := []struct {
name string
inputValue string
expectedJSON string
expectedDecode string
}{
{
name: "simple string",
inputValue: "hello world",
expectedJSON: `"hello world"`, // plain text, not base64
expectedDecode: "hello world",
},
{
name: "special characters",
inputValue: "test@#$%",
expectedJSON: `"test@#$%"`, // plain text, not base64
expectedDecode: "test@#$%",
},
{
name: "unicode string",
inputValue: "Hello 世界",
expectedJSON: `"Hello 世界"`, // plain text, not base64
expectedDecode: "Hello 世界",
},
{
name: "empty string",
inputValue: "",
expectedJSON: `""`,
expectedDecode: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Test MarshalJSON
s := NewSqlString(tt.inputValue)
data, err := json.Marshal(s)
if err != nil {
t.Fatalf("Marshal failed: %v", err)
}
if string(data) != tt.expectedJSON {
t.Errorf("Marshal: expected %s, got %s", tt.expectedJSON, string(data))
}
// Test UnmarshalJSON
var s2 SqlString
if err := json.Unmarshal(data, &s2); err != nil {
t.Fatalf("Unmarshal failed: %v", err)
}
if !s2.Valid {
t.Error("expected valid=true after unmarshal")
}
if s2.String() != tt.expectedDecode {
t.Errorf("Unmarshal: expected %q, got %q", tt.expectedDecode, s2.String())
}
})
}
}
func TestSqlString_JSON_Null(t *testing.T) {
// Test null handling
var s SqlString
if err := json.Unmarshal([]byte("null"), &s); err != nil {
t.Fatalf("Unmarshal null failed: %v", err)
}
if s.Valid {
t.Error("expected invalid after unmarshaling null")
}
// Test marshal null
data, err := json.Marshal(s)
if err != nil {
t.Fatalf("Marshal failed: %v", err)
}
if string(data) != "null" {
t.Errorf("expected null, got %s", string(data))
}
}
// TestSqlByteArray_Base64 tests SqlByteArray with base64 encoding/decoding
func TestSqlByteArray_Base64_Scan(t *testing.T) {
tests := []struct {
name string
input interface{}
expected []byte
valid bool
}{
{
name: "base64 encoded bytes from SQL",
input: "aGVsbG8gd29ybGQ=", // "hello world" in base64
expected: []byte("hello world"),
valid: true,
},
{
name: "plain bytes fallback",
input: "plain text",
expected: []byte("plain text"),
valid: true,
},
{
name: "bytes base64 encoded",
input: []byte("SGVsbG8gR29waGVy"), // "Hello Gopher" in base64
expected: []byte("Hello Gopher"),
valid: true,
},
{
name: "bytes plain fallback",
input: []byte("raw bytes"),
expected: []byte("raw bytes"),
valid: true,
},
{
name: "binary data",
input: "AQIDBA==", // []byte{1, 2, 3, 4} in base64
expected: []byte{1, 2, 3, 4},
valid: true,
},
{
name: "nil value",
input: nil,
expected: nil,
valid: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var b SqlByteArray
if err := b.Scan(tt.input); err != nil {
t.Fatalf("Scan failed: %v", err)
}
if b.Valid != tt.valid {
t.Errorf("expected valid=%v, got valid=%v", tt.valid, b.Valid)
}
if tt.valid {
if string(b.Val) != string(tt.expected) {
t.Errorf("expected %q, got %q", tt.expected, b.Val)
}
}
})
}
}
func TestSqlByteArray_Base64_JSON(t *testing.T) {
tests := []struct {
name string
inputValue []byte
expectedJSON string
expectedDecode []byte
}{
{
name: "text bytes",
inputValue: []byte("hello world"),
expectedJSON: `"aGVsbG8gd29ybGQ="`, // base64 encoded
expectedDecode: []byte("hello world"),
},
{
name: "binary data",
inputValue: []byte{0x01, 0x02, 0x03, 0x04, 0xFF},
expectedJSON: `"AQIDBP8="`, // base64 encoded
expectedDecode: []byte{0x01, 0x02, 0x03, 0x04, 0xFF},
},
{
name: "empty bytes",
inputValue: []byte{},
expectedJSON: `""`, // base64 of empty bytes
expectedDecode: []byte{},
},
{
name: "unicode bytes",
inputValue: []byte("Hello 世界"),
expectedJSON: `"SGVsbG8g5LiW55WM"`, // base64 encoded
expectedDecode: []byte("Hello 世界"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Test MarshalJSON
b := NewSqlByteArray(tt.inputValue)
data, err := json.Marshal(b)
if err != nil {
t.Fatalf("Marshal failed: %v", err)
}
if string(data) != tt.expectedJSON {
t.Errorf("Marshal: expected %s, got %s", tt.expectedJSON, string(data))
}
// Test UnmarshalJSON
var b2 SqlByteArray
if err := json.Unmarshal(data, &b2); err != nil {
t.Fatalf("Unmarshal failed: %v", err)
}
if !b2.Valid {
t.Error("expected valid=true after unmarshal")
}
if string(b2.Val) != string(tt.expectedDecode) {
t.Errorf("Unmarshal: expected %v, got %v", tt.expectedDecode, b2.Val)
}
})
}
}
func TestSqlByteArray_Base64_JSON_Null(t *testing.T) {
// Test null handling
var b SqlByteArray
if err := json.Unmarshal([]byte("null"), &b); err != nil {
t.Fatalf("Unmarshal null failed: %v", err)
}
if b.Valid {
t.Error("expected invalid after unmarshaling null")
}
// Test marshal null
data, err := json.Marshal(b)
if err != nil {
t.Fatalf("Marshal failed: %v", err)
}
if string(data) != "null" {
t.Errorf("expected null, got %s", string(data))
}
}
func TestSqlByteArray_Value(t *testing.T) {
tests := []struct {
name string
input SqlByteArray
expected interface{}
}{
{
name: "valid bytes",
input: NewSqlByteArray([]byte("test data")),
expected: []byte("test data"),
},
{
name: "empty bytes",
input: NewSqlByteArray([]byte{}),
expected: []byte{},
},
{
name: "invalid",
input: SqlByteArray{Valid: false},
expected: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
val, err := tt.input.Value()
if err != nil {
t.Fatalf("Value failed: %v", err)
}
if tt.expected == nil && val != nil {
t.Errorf("expected nil, got %v", val)
}
if tt.expected != nil && val == nil {
t.Errorf("expected %v, got nil", tt.expected)
}
if tt.expected != nil && val != nil {
if string(val.([]byte)) != string(tt.expected.([]byte)) {
t.Errorf("expected %v, got %v", tt.expected, val)
}
}
})
}
}
// TestSqlString_RoundTrip tests complete round-trip: Go -> JSON -> Go -> SQL -> Go
func TestSqlString_RoundTrip(t *testing.T) {
original := "Test String with Special Chars: @#$%^&*()"
// Go -> JSON
s1 := NewSqlString(original)
jsonData, err := json.Marshal(s1)
if err != nil {
t.Fatalf("Marshal failed: %v", err)
}
// JSON -> Go
var s2 SqlString
if err := json.Unmarshal(jsonData, &s2); err != nil {
t.Fatalf("Unmarshal failed: %v", err)
}
// Go -> SQL (Value)
_, err = s2.Value()
if err != nil {
t.Fatalf("Value failed: %v", err)
}
// SQL -> Go (Scan plain text)
var s3 SqlString
// Simulate SQL driver returning plain text value
if err := s3.Scan(original); err != nil {
t.Fatalf("Scan failed: %v", err)
}
// Verify round-trip
if s3.String() != original {
t.Errorf("Round-trip failed: expected %q, got %q", original, s3.String())
}
}
// TestSqlByteArray_Base64_RoundTrip tests complete round-trip: Go -> JSON -> Go -> SQL -> Go
func TestSqlByteArray_Base64_RoundTrip(t *testing.T) {
original := []byte{0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0xFF, 0xFE} // "Hello " + binary data
// Go -> JSON
b1 := NewSqlByteArray(original)
jsonData, err := json.Marshal(b1)
if err != nil {
t.Fatalf("Marshal failed: %v", err)
}
// JSON -> Go
var b2 SqlByteArray
if err := json.Unmarshal(jsonData, &b2); err != nil {
t.Fatalf("Unmarshal failed: %v", err)
}
// Go -> SQL (Value)
_, err = b2.Value()
if err != nil {
t.Fatalf("Value failed: %v", err)
}
// SQL -> Go (Scan with base64)
var b3 SqlByteArray
// Simulate SQL driver returning base64 encoded value
if err := b3.Scan("SGVsbG8g//4="); err != nil {
t.Fatalf("Scan failed: %v", err)
}
// Verify round-trip
if string(b3.Val) != string(original) {
t.Errorf("Round-trip failed: expected %v, got %v", original, b3.Val)
}
}