mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2026-01-30 22:44:25 +00:00
Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc8f44e3e8 | ||
|
|
584bb9813d | ||
|
|
17239d1611 | ||
|
|
defe27549b | ||
|
|
f7725340a6 | ||
|
|
07016d1b73 | ||
|
|
09f2256899 | ||
|
|
c12c045db1 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -26,3 +26,4 @@ go.work.sum
|
||||
bin/
|
||||
test.db
|
||||
/testserver
|
||||
tests/data/
|
||||
@@ -130,6 +130,9 @@ func validateWhereClauseSecurity(where string) error {
|
||||
// Note: This function will NOT add prefixes to unprefixed columns. It will only fix
|
||||
// incorrect prefixes (e.g., wrong_table.column -> correct_table.column), unless the
|
||||
// prefix matches a preloaded relation name, in which case it's left unchanged.
|
||||
//
|
||||
// IMPORTANT: Outer parentheses are preserved if the clause contains top-level OR operators
|
||||
// to prevent OR logic from escaping and affecting the entire query incorrectly.
|
||||
func SanitizeWhereClause(where string, tableName string, options ...*RequestOptions) string {
|
||||
if where == "" {
|
||||
return ""
|
||||
@@ -143,8 +146,19 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
|
||||
return ""
|
||||
}
|
||||
|
||||
// Strip outer parentheses and re-trim
|
||||
where = stripOuterParentheses(where)
|
||||
// Check if the original clause has outer parentheses and contains OR operators
|
||||
// If so, we need to preserve the outer parentheses to prevent OR logic from escaping
|
||||
hasOuterParens := false
|
||||
if len(where) > 0 && where[0] == '(' && where[len(where)-1] == ')' {
|
||||
_, hasOuterParens = stripOneMatchingOuterParen(where)
|
||||
}
|
||||
|
||||
// Strip outer parentheses and re-trim for processing
|
||||
whereWithoutParens := stripOuterParentheses(where)
|
||||
shouldPreserveParens := hasOuterParens && containsTopLevelOR(whereWithoutParens)
|
||||
|
||||
// Use the stripped version for processing
|
||||
where = whereWithoutParens
|
||||
|
||||
// Get valid columns from the model if tableName is provided
|
||||
var validColumns map[string]bool
|
||||
@@ -229,7 +243,14 @@ func SanitizeWhereClause(where string, tableName string, options ...*RequestOpti
|
||||
|
||||
result := strings.Join(validConditions, " AND ")
|
||||
|
||||
if result != where {
|
||||
// If the original clause had outer parentheses and contains OR operators,
|
||||
// restore the outer parentheses to prevent OR logic from escaping
|
||||
if shouldPreserveParens {
|
||||
result = "(" + result + ")"
|
||||
logger.Debug("Preserved outer parentheses for OR conditions: '%s'", result)
|
||||
}
|
||||
|
||||
if result != where && !shouldPreserveParens {
|
||||
logger.Debug("Sanitized WHERE clause: '%s' -> '%s'", where, result)
|
||||
}
|
||||
|
||||
@@ -290,6 +311,93 @@ func stripOneMatchingOuterParen(s string) (string, bool) {
|
||||
return strings.TrimSpace(s[1 : len(s)-1]), true
|
||||
}
|
||||
|
||||
// EnsureOuterParentheses ensures that a SQL clause is wrapped in parentheses
|
||||
// to prevent OR logic from escaping. It checks if the clause already has
|
||||
// matching outer parentheses and only adds them if they don't exist.
|
||||
//
|
||||
// This is particularly important for OR conditions and complex filters where
|
||||
// the absence of parentheses could cause the logic to escape and affect
|
||||
// the entire query incorrectly.
|
||||
//
|
||||
// Parameters:
|
||||
// - clause: The SQL clause to check and potentially wrap
|
||||
//
|
||||
// Returns:
|
||||
// - The clause with guaranteed outer parentheses, or empty string if input is empty
|
||||
func EnsureOuterParentheses(clause string) string {
|
||||
if clause == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
clause = strings.TrimSpace(clause)
|
||||
if clause == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check if the clause already has matching outer parentheses
|
||||
_, hasOuterParens := stripOneMatchingOuterParen(clause)
|
||||
|
||||
// If it already has matching outer parentheses, return as-is
|
||||
if hasOuterParens {
|
||||
return clause
|
||||
}
|
||||
|
||||
// Otherwise, wrap it in parentheses
|
||||
return "(" + clause + ")"
|
||||
}
|
||||
|
||||
// containsTopLevelOR checks if a SQL clause contains OR operators at the top level
|
||||
// (i.e., not inside parentheses or subqueries). This is used to determine if
|
||||
// outer parentheses should be preserved to prevent OR logic from escaping.
|
||||
func containsTopLevelOR(clause string) bool {
|
||||
if clause == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
depth := 0
|
||||
inSingleQuote := false
|
||||
inDoubleQuote := false
|
||||
lowerClause := strings.ToLower(clause)
|
||||
|
||||
for i := 0; i < len(clause); i++ {
|
||||
ch := clause[i]
|
||||
|
||||
// Track quote state
|
||||
if ch == '\'' && !inDoubleQuote {
|
||||
inSingleQuote = !inSingleQuote
|
||||
continue
|
||||
}
|
||||
if ch == '"' && !inSingleQuote {
|
||||
inDoubleQuote = !inDoubleQuote
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if inside quotes
|
||||
if inSingleQuote || inDoubleQuote {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track parenthesis depth
|
||||
switch ch {
|
||||
case '(':
|
||||
depth++
|
||||
case ')':
|
||||
depth--
|
||||
}
|
||||
|
||||
// Only check for OR at depth 0 (not inside parentheses)
|
||||
if depth == 0 && i+4 <= len(clause) {
|
||||
// Check for " OR " (case-insensitive)
|
||||
substring := lowerClause[i : i+4]
|
||||
if substring == " or " {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// splitByAND splits a WHERE clause by AND operators (case-insensitive)
|
||||
// This is parenthesis-aware and won't split on AND operators inside subqueries
|
||||
func splitByAND(where string) []string {
|
||||
|
||||
@@ -659,6 +659,179 @@ func TestSanitizeWhereClauseWithModel(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureOuterParentheses(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "no parentheses",
|
||||
input: "status = 'active'",
|
||||
expected: "(status = 'active')",
|
||||
},
|
||||
{
|
||||
name: "already has outer parentheses",
|
||||
input: "(status = 'active')",
|
||||
expected: "(status = 'active')",
|
||||
},
|
||||
{
|
||||
name: "OR condition without parentheses",
|
||||
input: "status = 'active' OR status = 'pending'",
|
||||
expected: "(status = 'active' OR status = 'pending')",
|
||||
},
|
||||
{
|
||||
name: "OR condition with parentheses",
|
||||
input: "(status = 'active' OR status = 'pending')",
|
||||
expected: "(status = 'active' OR status = 'pending')",
|
||||
},
|
||||
{
|
||||
name: "complex condition with nested parentheses",
|
||||
input: "(status = 'active' OR status = 'pending') AND (age > 18)",
|
||||
expected: "((status = 'active' OR status = 'pending') AND (age > 18))",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "whitespace only",
|
||||
input: " ",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "mismatched parentheses - adds outer ones",
|
||||
input: "(status = 'active' OR status = 'pending'",
|
||||
expected: "((status = 'active' OR status = 'pending')",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := EnsureOuterParentheses(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("EnsureOuterParentheses(%q) = %q; want %q", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainsTopLevelOR(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no OR operator",
|
||||
input: "status = 'active' AND age > 18",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "top-level OR",
|
||||
input: "status = 'active' OR status = 'pending'",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "OR inside parentheses",
|
||||
input: "age > 18 AND (status = 'active' OR status = 'pending')",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "OR in subquery",
|
||||
input: "id IN (SELECT id FROM users WHERE status = 'active' OR status = 'pending')",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "OR inside quotes",
|
||||
input: "comment = 'this OR that'",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "mixed - top-level OR and nested OR",
|
||||
input: "name = 'test' OR (status = 'active' OR status = 'pending')",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "lowercase or",
|
||||
input: "status = 'active' or status = 'pending'",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "uppercase OR",
|
||||
input: "status = 'active' OR status = 'pending'",
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := containsTopLevelOR(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("containsTopLevelOR(%q) = %v; want %v", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeWhereClause_PreservesParenthesesWithOR(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
where string
|
||||
tableName string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "OR condition with outer parentheses - preserved",
|
||||
where: "(status = 'active' OR status = 'pending')",
|
||||
tableName: "users",
|
||||
expected: "(users.status = 'active' OR users.status = 'pending')",
|
||||
},
|
||||
{
|
||||
name: "AND condition with outer parentheses - stripped (no OR)",
|
||||
where: "(status = 'active' AND age > 18)",
|
||||
tableName: "users",
|
||||
expected: "users.status = 'active' AND users.age > 18",
|
||||
},
|
||||
{
|
||||
name: "complex OR with nested conditions",
|
||||
where: "((status = 'active' OR status = 'pending') AND age > 18)",
|
||||
tableName: "users",
|
||||
// Outer parens are stripped, but inner parens with OR are preserved
|
||||
expected: "(users.status = 'active' OR users.status = 'pending') AND users.age > 18",
|
||||
},
|
||||
{
|
||||
name: "OR without outer parentheses - no parentheses added by SanitizeWhereClause",
|
||||
where: "status = 'active' OR status = 'pending'",
|
||||
tableName: "users",
|
||||
expected: "users.status = 'active' OR users.status = 'pending'",
|
||||
},
|
||||
{
|
||||
name: "simple OR with parentheses - preserved",
|
||||
where: "(users.status = 'active' OR users.status = 'pending')",
|
||||
tableName: "users",
|
||||
// Already has correct prefixes, parentheses preserved
|
||||
expected: "(users.status = 'active' OR users.status = 'pending')",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
prefixedWhere := AddTablePrefixToColumns(tt.where, tt.tableName)
|
||||
result := SanitizeWhereClause(prefixedWhere, tt.tableName)
|
||||
if result != tt.expected {
|
||||
t.Errorf("SanitizeWhereClause(%q, %q) = %q; want %q", tt.where, tt.tableName, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddTablePrefixToColumns_ComplexConditions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -52,6 +52,10 @@ type PreloadOption struct {
|
||||
PrimaryKey string `json:"primary_key"` // Primary key of the related table
|
||||
RelatedKey string `json:"related_key"` // For child tables: column in child that references parent
|
||||
ForeignKey string `json:"foreign_key"` // For parent tables: column in current table that references parent
|
||||
|
||||
// Custom SQL JOINs from XFiles - used when preload needs additional joins
|
||||
SqlJoins []string `json:"sql_joins"` // Custom SQL JOIN clauses
|
||||
JoinAliases []string `json:"join_aliases"` // Extracted table aliases from SqlJoins for validation
|
||||
}
|
||||
|
||||
type FilterOption struct {
|
||||
|
||||
@@ -237,15 +237,29 @@ func (v *ColumnValidator) FilterRequestOptions(options RequestOptions) RequestOp
|
||||
for _, sort := range options.Sort {
|
||||
if v.IsValidColumn(sort.Column) {
|
||||
validSorts = append(validSorts, sort)
|
||||
} else if strings.HasPrefix(sort.Column, "(") && strings.HasSuffix(sort.Column, ")") {
|
||||
// Allow sort by expression/subquery, but validate for security
|
||||
if IsSafeSortExpression(sort.Column) {
|
||||
validSorts = append(validSorts, sort)
|
||||
} else {
|
||||
logger.Warn("Unsafe sort expression '%s' removed", sort.Column)
|
||||
}
|
||||
} else {
|
||||
logger.Warn("Invalid column in sort '%s' removed", sort.Column)
|
||||
foundJoin := false
|
||||
for _, j := range options.JoinAliases {
|
||||
if strings.Contains(sort.Column, j) {
|
||||
foundJoin = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundJoin {
|
||||
validSorts = append(validSorts, sort)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(sort.Column, "(") && strings.HasSuffix(sort.Column, ")") {
|
||||
// Allow sort by expression/subquery, but validate for security
|
||||
if IsSafeSortExpression(sort.Column) {
|
||||
validSorts = append(validSorts, sort)
|
||||
} else {
|
||||
logger.Warn("Unsafe sort expression '%s' removed", sort.Column)
|
||||
}
|
||||
|
||||
} else {
|
||||
logger.Warn("Invalid column in sort '%s' removed", sort.Column)
|
||||
}
|
||||
}
|
||||
}
|
||||
filtered.Sort = validSorts
|
||||
@@ -258,13 +272,29 @@ func (v *ColumnValidator) FilterRequestOptions(options RequestOptions) RequestOp
|
||||
filteredPreload.Columns = v.FilterValidColumns(preload.Columns)
|
||||
filteredPreload.OmitColumns = v.FilterValidColumns(preload.OmitColumns)
|
||||
|
||||
// Preserve SqlJoins and JoinAliases for preloads with custom joins
|
||||
filteredPreload.SqlJoins = preload.SqlJoins
|
||||
filteredPreload.JoinAliases = preload.JoinAliases
|
||||
|
||||
// Filter preload filters
|
||||
validPreloadFilters := make([]FilterOption, 0, len(preload.Filters))
|
||||
for _, filter := range preload.Filters {
|
||||
if v.IsValidColumn(filter.Column) {
|
||||
validPreloadFilters = append(validPreloadFilters, filter)
|
||||
} else {
|
||||
logger.Warn("Invalid column in preload '%s' filter '%s' removed", preload.Relation, filter.Column)
|
||||
// Check if the filter column references a joined table alias
|
||||
foundJoin := false
|
||||
for _, alias := range preload.JoinAliases {
|
||||
if strings.Contains(filter.Column, alias) {
|
||||
foundJoin = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundJoin {
|
||||
validPreloadFilters = append(validPreloadFilters, filter)
|
||||
} else {
|
||||
logger.Warn("Invalid column in preload '%s' filter '%s' removed", preload.Relation, filter.Column)
|
||||
}
|
||||
}
|
||||
}
|
||||
filteredPreload.Filters = validPreloadFilters
|
||||
@@ -291,6 +321,9 @@ func (v *ColumnValidator) FilterRequestOptions(options RequestOptions) RequestOp
|
||||
}
|
||||
filtered.Preload = validPreloads
|
||||
|
||||
// Clear JoinAliases - this is an internal validation field and should not be persisted
|
||||
filtered.JoinAliases = nil
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
|
||||
@@ -362,6 +362,29 @@ func TestFilterRequestOptions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterRequestOptions_ClearsJoinAliases(t *testing.T) {
|
||||
model := TestModel{}
|
||||
validator := NewColumnValidator(model)
|
||||
|
||||
options := RequestOptions{
|
||||
Columns: []string{"id", "name"},
|
||||
// Set JoinAliases - this should be cleared by FilterRequestOptions
|
||||
JoinAliases: []string{"d", "u", "r"},
|
||||
}
|
||||
|
||||
filtered := validator.FilterRequestOptions(options)
|
||||
|
||||
// Verify that JoinAliases was cleared (internal field should not persist)
|
||||
if filtered.JoinAliases != nil {
|
||||
t.Errorf("Expected JoinAliases to be nil after filtering, got %v", filtered.JoinAliases)
|
||||
}
|
||||
|
||||
// Verify that other fields are still properly filtered
|
||||
if len(filtered.Columns) != 2 {
|
||||
t.Errorf("Expected 2 columns, got %d", len(filtered.Columns))
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSafeSortExpression(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -221,7 +221,10 @@ func (cc *ConnectionConfig) ApplyDefaults(global *ManagerConfig) {
|
||||
cc.ConnectTimeout = 10 * time.Second
|
||||
}
|
||||
if cc.QueryTimeout == 0 {
|
||||
cc.QueryTimeout = 30 * time.Second
|
||||
cc.QueryTimeout = 2 * time.Minute // Default to 2 minutes
|
||||
} else if cc.QueryTimeout < 2*time.Minute {
|
||||
// Enforce minimum of 2 minutes
|
||||
cc.QueryTimeout = 2 * time.Minute
|
||||
}
|
||||
|
||||
// Default ORM
|
||||
@@ -325,14 +328,29 @@ func (cc *ConnectionConfig) buildPostgresDSN() string {
|
||||
dsn += fmt.Sprintf(" search_path=%s", cc.Schema)
|
||||
}
|
||||
|
||||
// Add statement_timeout for query execution timeout (in milliseconds)
|
||||
if cc.QueryTimeout > 0 {
|
||||
timeoutMs := int(cc.QueryTimeout.Milliseconds())
|
||||
dsn += fmt.Sprintf(" statement_timeout=%d", timeoutMs)
|
||||
}
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
||||
func (cc *ConnectionConfig) buildSQLiteDSN() string {
|
||||
if cc.FilePath != "" {
|
||||
return cc.FilePath
|
||||
filepath := cc.FilePath
|
||||
if filepath == "" {
|
||||
filepath = ":memory:"
|
||||
}
|
||||
return ":memory:"
|
||||
|
||||
// Add query parameters for timeouts
|
||||
// Note: SQLite driver supports _timeout parameter (in milliseconds)
|
||||
if cc.QueryTimeout > 0 {
|
||||
timeoutMs := int(cc.QueryTimeout.Milliseconds())
|
||||
filepath += fmt.Sprintf("?_timeout=%d", timeoutMs)
|
||||
}
|
||||
|
||||
return filepath
|
||||
}
|
||||
|
||||
func (cc *ConnectionConfig) buildMSSQLDSN() string {
|
||||
@@ -344,6 +362,24 @@ func (cc *ConnectionConfig) buildMSSQLDSN() string {
|
||||
dsn += fmt.Sprintf("&schema=%s", cc.Schema)
|
||||
}
|
||||
|
||||
// Add connection timeout (in seconds)
|
||||
if cc.ConnectTimeout > 0 {
|
||||
timeoutSec := int(cc.ConnectTimeout.Seconds())
|
||||
dsn += fmt.Sprintf("&connection timeout=%d", timeoutSec)
|
||||
}
|
||||
|
||||
// Add dial timeout for TCP connection (in seconds)
|
||||
if cc.ConnectTimeout > 0 {
|
||||
dialTimeoutSec := int(cc.ConnectTimeout.Seconds())
|
||||
dsn += fmt.Sprintf("&dial timeout=%d", dialTimeoutSec)
|
||||
}
|
||||
|
||||
// Add read timeout (in seconds) - enforces timeout for reading data
|
||||
if cc.QueryTimeout > 0 {
|
||||
readTimeoutSec := int(cc.QueryTimeout.Seconds())
|
||||
dsn += fmt.Sprintf("&read timeout=%d", readTimeoutSec)
|
||||
}
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
||||
|
||||
@@ -76,8 +76,12 @@ func (p *SQLiteProvider) Connect(ctx context.Context, cfg ConnectionConfig) erro
|
||||
// Don't fail connection if WAL mode cannot be enabled
|
||||
}
|
||||
|
||||
// Set busy timeout to handle locked database
|
||||
_, err = db.ExecContext(ctx, "PRAGMA busy_timeout=5000")
|
||||
// Set busy timeout to handle locked database (minimum 2 minutes = 120000ms)
|
||||
busyTimeout := cfg.GetQueryTimeout().Milliseconds()
|
||||
if busyTimeout < 120000 {
|
||||
busyTimeout = 120000 // Enforce minimum of 2 minutes
|
||||
}
|
||||
_, err = db.ExecContext(ctx, fmt.Sprintf("PRAGMA busy_timeout=%d", busyTimeout))
|
||||
if err != nil {
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to set busy timeout for SQLite", "error", err)
|
||||
|
||||
@@ -318,6 +318,8 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
if cursorFilter != "" {
|
||||
logger.Debug("Applying cursor filter: %s", cursorFilter)
|
||||
sanitizedCursor := common.SanitizeWhereClause(cursorFilter, reflection.ExtractTableNameOnly(tableName), &options)
|
||||
// Ensure outer parentheses to prevent OR logic from escaping
|
||||
sanitizedCursor = common.EnsureOuterParentheses(sanitizedCursor)
|
||||
if sanitizedCursor != "" {
|
||||
query = query.Where(sanitizedCursor)
|
||||
}
|
||||
@@ -1656,6 +1658,8 @@ func (h *Handler) applyPreloads(model interface{}, query common.SelectQuery, pre
|
||||
// Build RequestOptions with all preloads to allow references to sibling relations
|
||||
preloadOpts := &common.RequestOptions{Preload: preloads}
|
||||
sanitizedWhere := common.SanitizeWhereClause(preload.Where, reflection.ExtractTableNameOnly(preload.Relation), preloadOpts)
|
||||
// Ensure outer parentheses to prevent OR logic from escaping
|
||||
sanitizedWhere = common.EnsureOuterParentheses(sanitizedWhere)
|
||||
if len(sanitizedWhere) > 0 {
|
||||
sq = sq.Where(sanitizedWhere)
|
||||
}
|
||||
|
||||
@@ -463,7 +463,8 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
}
|
||||
|
||||
// Apply filters - validate and adjust for column types first
|
||||
for i := range options.Filters {
|
||||
// Group consecutive OR filters together to prevent OR logic from escaping
|
||||
for i := 0; i < len(options.Filters); {
|
||||
filter := &options.Filters[i]
|
||||
|
||||
// Validate and adjust filter based on column type
|
||||
@@ -475,8 +476,39 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
logicOp = "AND"
|
||||
}
|
||||
|
||||
logger.Debug("Applying filter: %s %s %v (needsCast=%v, logic=%s)", filter.Column, filter.Operator, filter.Value, castInfo.NeedsCast, logicOp)
|
||||
query = h.applyFilter(query, *filter, tableName, castInfo.NeedsCast, logicOp)
|
||||
// Check if this is the start of an OR group
|
||||
if logicOp == "OR" {
|
||||
// Collect all consecutive OR filters
|
||||
orFilters := []*common.FilterOption{filter}
|
||||
orCastInfo := []ColumnCastInfo{castInfo}
|
||||
|
||||
j := i + 1
|
||||
for j < len(options.Filters) {
|
||||
nextFilter := &options.Filters[j]
|
||||
nextLogicOp := nextFilter.LogicOperator
|
||||
if nextLogicOp == "" {
|
||||
nextLogicOp = "AND"
|
||||
}
|
||||
if nextLogicOp == "OR" {
|
||||
nextCastInfo := h.ValidateAndAdjustFilterForColumnType(nextFilter, model)
|
||||
orFilters = append(orFilters, nextFilter)
|
||||
orCastInfo = append(orCastInfo, nextCastInfo)
|
||||
j++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the OR group as a single grouped condition
|
||||
logger.Debug("Applying OR filter group with %d conditions", len(orFilters))
|
||||
query = h.applyOrFilterGroup(query, orFilters, orCastInfo, tableName)
|
||||
i = j
|
||||
} else {
|
||||
// Single AND filter - apply normally
|
||||
logger.Debug("Applying filter: %s %s %v (needsCast=%v, logic=%s)", filter.Column, filter.Operator, filter.Value, castInfo.NeedsCast, logicOp)
|
||||
query = h.applyFilter(query, *filter, tableName, castInfo.NeedsCast, logicOp)
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// Apply custom SQL WHERE clause (AND condition)
|
||||
@@ -486,6 +518,8 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
prefixedWhere := common.AddTablePrefixToColumns(options.CustomSQLWhere, reflection.ExtractTableNameOnly(tableName))
|
||||
// Then sanitize and allow preload table prefixes since custom SQL may reference multiple tables
|
||||
sanitizedWhere := common.SanitizeWhereClause(prefixedWhere, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions)
|
||||
// Ensure outer parentheses to prevent OR logic from escaping
|
||||
sanitizedWhere = common.EnsureOuterParentheses(sanitizedWhere)
|
||||
if sanitizedWhere != "" {
|
||||
query = query.Where(sanitizedWhere)
|
||||
}
|
||||
@@ -497,6 +531,8 @@ func (h *Handler) handleRead(ctx context.Context, w common.ResponseWriter, id st
|
||||
customOr := common.AddTablePrefixToColumns(options.CustomSQLOr, reflection.ExtractTableNameOnly(tableName))
|
||||
// Sanitize and allow preload table prefixes since custom SQL may reference multiple tables
|
||||
sanitizedOr := common.SanitizeWhereClause(customOr, reflection.ExtractTableNameOnly(tableName), &options.RequestOptions)
|
||||
// Ensure outer parentheses to prevent OR logic from escaping
|
||||
sanitizedOr = common.EnsureOuterParentheses(sanitizedOr)
|
||||
if sanitizedOr != "" {
|
||||
query = query.WhereOr(sanitizedOr)
|
||||
}
|
||||
@@ -846,6 +882,15 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
|
||||
}
|
||||
}
|
||||
|
||||
// Apply custom SQL joins from XFiles
|
||||
if len(preload.SqlJoins) > 0 {
|
||||
logger.Debug("Applying %d SQL joins to preload %s", len(preload.SqlJoins), preload.Relation)
|
||||
for _, joinClause := range preload.SqlJoins {
|
||||
sq = sq.Join(joinClause)
|
||||
logger.Debug("Applied SQL join to preload %s: %s", preload.Relation, joinClause)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply filters
|
||||
if len(preload.Filters) > 0 {
|
||||
for _, filter := range preload.Filters {
|
||||
@@ -893,21 +938,57 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
|
||||
})
|
||||
|
||||
// Handle recursive preloading
|
||||
if preload.Recursive && depth < 4 {
|
||||
if preload.Recursive && depth < 8 {
|
||||
logger.Debug("Applying recursive preload for %s at depth %d", preload.Relation, depth+1)
|
||||
|
||||
// For recursive relationships, we need to get the last part of the relation path
|
||||
// e.g., "MastertaskItems" -> "MastertaskItems.MastertaskItems"
|
||||
relationParts := strings.Split(preload.Relation, ".")
|
||||
lastRelationName := relationParts[len(relationParts)-1]
|
||||
|
||||
// Create a recursive preload with the same configuration
|
||||
// but with the relation path extended
|
||||
recursivePreload := preload
|
||||
recursivePreload.Relation = preload.Relation + "." + lastRelationName
|
||||
// Generate FK-based relation name for children
|
||||
recursiveRelationName := lastRelationName
|
||||
if preload.RelatedKey != "" {
|
||||
// Convert "rid_parentmastertaskitem" to "RID_PARENTMASTERTASKITEM"
|
||||
fkUpper := strings.ToUpper(preload.RelatedKey)
|
||||
recursiveRelationName = lastRelationName + "_" + fkUpper
|
||||
logger.Debug("Generated recursive relation name from RelatedKey: %s (from %s)",
|
||||
recursiveRelationName, preload.RelatedKey)
|
||||
} else {
|
||||
logger.Warn("Recursive preload for %s has no RelatedKey, falling back to %s.%s",
|
||||
preload.Relation, preload.Relation, lastRelationName)
|
||||
}
|
||||
|
||||
// Recursively apply preload until we reach depth 5
|
||||
// Create recursive preload
|
||||
recursivePreload := preload
|
||||
recursivePreload.Relation = preload.Relation + "." + recursiveRelationName
|
||||
recursivePreload.Recursive = false // Prevent infinite recursion at this level
|
||||
|
||||
// CRITICAL: Clear parent's WHERE clause - let Bun use FK traversal
|
||||
recursivePreload.Where = ""
|
||||
recursivePreload.Filters = []common.FilterOption{}
|
||||
logger.Debug("Cleared WHERE clause for recursive preload %s at depth %d",
|
||||
recursivePreload.Relation, depth+1)
|
||||
|
||||
// Apply recursively up to depth 8
|
||||
query = h.applyPreloadWithRecursion(query, recursivePreload, allPreloads, model, depth+1)
|
||||
|
||||
// ALSO: Extend any child relations (like DEF) to recursive levels
|
||||
baseRelation := preload.Relation + "."
|
||||
for i := range allPreloads {
|
||||
relatedPreload := allPreloads[i]
|
||||
if strings.HasPrefix(relatedPreload.Relation, baseRelation) &&
|
||||
!strings.Contains(strings.TrimPrefix(relatedPreload.Relation, baseRelation), ".") {
|
||||
childRelationName := strings.TrimPrefix(relatedPreload.Relation, baseRelation)
|
||||
|
||||
extendedChildPreload := relatedPreload
|
||||
extendedChildPreload.Relation = recursivePreload.Relation + "." + childRelationName
|
||||
extendedChildPreload.Recursive = false
|
||||
|
||||
logger.Debug("Extending related preload '%s' to '%s' at recursive depth %d",
|
||||
relatedPreload.Relation, extendedChildPreload.Relation, depth+1)
|
||||
|
||||
query = h.applyPreloadWithRecursion(query, extendedChildPreload, allPreloads, model, depth+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return query
|
||||
@@ -1996,6 +2077,99 @@ func (h *Handler) applyFilter(query common.SelectQuery, filter common.FilterOpti
|
||||
}
|
||||
}
|
||||
|
||||
// applyOrFilterGroup applies a group of OR filters as a single grouped condition
|
||||
// This ensures OR conditions are properly grouped with parentheses to prevent OR logic from escaping
|
||||
func (h *Handler) applyOrFilterGroup(query common.SelectQuery, filters []*common.FilterOption, castInfo []ColumnCastInfo, tableName string) common.SelectQuery {
|
||||
if len(filters) == 0 {
|
||||
return query
|
||||
}
|
||||
|
||||
// Build individual filter conditions
|
||||
conditions := []string{}
|
||||
args := []interface{}{}
|
||||
|
||||
for i, filter := range filters {
|
||||
// Qualify the column name with table name if not already qualified
|
||||
qualifiedColumn := h.qualifyColumnName(filter.Column, tableName)
|
||||
|
||||
// Apply casting to text if needed for non-numeric columns or non-numeric values
|
||||
if castInfo[i].NeedsCast {
|
||||
qualifiedColumn = fmt.Sprintf("CAST(%s AS TEXT)", qualifiedColumn)
|
||||
}
|
||||
|
||||
// Build the condition based on operator
|
||||
condition, filterArgs := h.buildFilterCondition(qualifiedColumn, filter, tableName)
|
||||
if condition != "" {
|
||||
conditions = append(conditions, condition)
|
||||
args = append(args, filterArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(conditions) == 0 {
|
||||
return query
|
||||
}
|
||||
|
||||
// Join all conditions with OR and wrap in parentheses
|
||||
groupedCondition := "(" + strings.Join(conditions, " OR ") + ")"
|
||||
logger.Debug("Applying grouped OR conditions: %s", groupedCondition)
|
||||
|
||||
// Apply as AND condition (the OR is already inside the parentheses)
|
||||
return query.Where(groupedCondition, args...)
|
||||
}
|
||||
|
||||
// buildFilterCondition builds a single filter condition and returns the condition string and args
|
||||
func (h *Handler) buildFilterCondition(qualifiedColumn string, filter *common.FilterOption, tableName string) (filterStr string, filterInterface []interface{}) {
|
||||
switch strings.ToLower(filter.Operator) {
|
||||
case "eq", "equals":
|
||||
return fmt.Sprintf("%s = ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "neq", "not_equals", "ne":
|
||||
return fmt.Sprintf("%s != ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "gt", "greater_than":
|
||||
return fmt.Sprintf("%s > ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "gte", "greater_than_equals", "ge":
|
||||
return fmt.Sprintf("%s >= ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "lt", "less_than":
|
||||
return fmt.Sprintf("%s < ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "lte", "less_than_equals", "le":
|
||||
return fmt.Sprintf("%s <= ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "like":
|
||||
return fmt.Sprintf("%s LIKE ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "ilike":
|
||||
return fmt.Sprintf("%s ILIKE ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "in":
|
||||
return fmt.Sprintf("%s IN (?)", qualifiedColumn), []interface{}{filter.Value}
|
||||
case "between":
|
||||
// Handle between operator - exclusive (> val1 AND < val2)
|
||||
if values, ok := filter.Value.([]interface{}); ok && len(values) == 2 {
|
||||
return fmt.Sprintf("(%s > ? AND %s < ?)", qualifiedColumn, qualifiedColumn), []interface{}{values[0], values[1]}
|
||||
} else if values, ok := filter.Value.([]string); ok && len(values) == 2 {
|
||||
return fmt.Sprintf("(%s > ? AND %s < ?)", qualifiedColumn, qualifiedColumn), []interface{}{values[0], values[1]}
|
||||
}
|
||||
logger.Warn("Invalid BETWEEN filter value format")
|
||||
return "", nil
|
||||
case "between_inclusive":
|
||||
// Handle between inclusive operator - inclusive (>= val1 AND <= val2)
|
||||
if values, ok := filter.Value.([]interface{}); ok && len(values) == 2 {
|
||||
return fmt.Sprintf("(%s >= ? AND %s <= ?)", qualifiedColumn, qualifiedColumn), []interface{}{values[0], values[1]}
|
||||
} else if values, ok := filter.Value.([]string); ok && len(values) == 2 {
|
||||
return fmt.Sprintf("(%s >= ? AND %s <= ?)", qualifiedColumn, qualifiedColumn), []interface{}{values[0], values[1]}
|
||||
}
|
||||
logger.Warn("Invalid BETWEEN INCLUSIVE filter value format")
|
||||
return "", nil
|
||||
case "is_null", "isnull":
|
||||
// Check for NULL values - don't use cast for NULL checks
|
||||
colName := h.qualifyColumnName(filter.Column, tableName)
|
||||
return fmt.Sprintf("(%s IS NULL OR %s = '')", colName, colName), nil
|
||||
case "is_not_null", "isnotnull":
|
||||
// Check for NOT NULL values - don't use cast for NULL checks
|
||||
colName := h.qualifyColumnName(filter.Column, tableName)
|
||||
return fmt.Sprintf("(%s IS NOT NULL AND %s != '')", colName, colName), nil
|
||||
default:
|
||||
logger.Warn("Unknown filter operator: %s, defaulting to equals", filter.Operator)
|
||||
return fmt.Sprintf("%s = ?", qualifiedColumn), []interface{}{filter.Value}
|
||||
}
|
||||
}
|
||||
|
||||
// parseTableName splits a table name that may contain schema into separate schema and table
|
||||
func (h *Handler) parseTableName(fullTableName string) (schema, table string) {
|
||||
if idx := strings.LastIndex(fullTableName, "."); idx != -1 {
|
||||
|
||||
@@ -1088,6 +1088,32 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
|
||||
logger.Debug("X-Files: Set foreign key for %s: %s", relationPath, xfile.ForeignKey)
|
||||
}
|
||||
|
||||
// Transfer SqlJoins from XFiles to PreloadOption
|
||||
if len(xfile.SqlJoins) > 0 {
|
||||
preloadOpt.SqlJoins = make([]string, 0, len(xfile.SqlJoins))
|
||||
preloadOpt.JoinAliases = make([]string, 0, len(xfile.SqlJoins))
|
||||
|
||||
for _, joinClause := range xfile.SqlJoins {
|
||||
// Sanitize the join clause
|
||||
sanitizedJoin := common.SanitizeWhereClause(joinClause, "", nil)
|
||||
if sanitizedJoin == "" {
|
||||
logger.Warn("X-Files: SqlJoin failed sanitization for %s: %s", relationPath, joinClause)
|
||||
continue
|
||||
}
|
||||
|
||||
preloadOpt.SqlJoins = append(preloadOpt.SqlJoins, sanitizedJoin)
|
||||
|
||||
// Extract join alias for validation
|
||||
alias := extractJoinAlias(sanitizedJoin)
|
||||
if alias != "" {
|
||||
preloadOpt.JoinAliases = append(preloadOpt.JoinAliases, alias)
|
||||
logger.Debug("X-Files: Extracted join alias for %s: %s", relationPath, alias)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("X-Files: Added %d SQL joins to preload %s", len(preloadOpt.SqlJoins), relationPath)
|
||||
}
|
||||
|
||||
// Add the preload option
|
||||
options.Preload = append(options.Preload, preloadOpt)
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ package restheadspec
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
func TestDecodeHeaderValue(t *testing.T) {
|
||||
@@ -37,6 +39,121 @@ func TestDecodeHeaderValue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddXFilesPreload_WithSqlJoins(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
options := &ExtendedRequestOptions{
|
||||
RequestOptions: common.RequestOptions{
|
||||
Preload: make([]common.PreloadOption, 0),
|
||||
},
|
||||
}
|
||||
|
||||
// Create an XFiles with SqlJoins
|
||||
xfile := &XFiles{
|
||||
TableName: "users",
|
||||
SqlJoins: []string{
|
||||
"LEFT JOIN departments d ON d.id = users.department_id",
|
||||
"INNER JOIN roles r ON r.id = users.role_id",
|
||||
},
|
||||
FilterFields: []struct {
|
||||
Field string `json:"field"`
|
||||
Value string `json:"value"`
|
||||
Operator string `json:"operator"`
|
||||
}{
|
||||
{Field: "d.active", Value: "true", Operator: "eq"},
|
||||
{Field: "r.name", Value: "admin", Operator: "eq"},
|
||||
},
|
||||
}
|
||||
|
||||
// Add the XFiles preload
|
||||
handler.addXFilesPreload(xfile, options, "")
|
||||
|
||||
// Verify that a preload was added
|
||||
if len(options.Preload) != 1 {
|
||||
t.Fatalf("Expected 1 preload, got %d", len(options.Preload))
|
||||
}
|
||||
|
||||
preload := options.Preload[0]
|
||||
|
||||
// Verify relation name
|
||||
if preload.Relation != "users" {
|
||||
t.Errorf("Expected relation 'users', got '%s'", preload.Relation)
|
||||
}
|
||||
|
||||
// Verify SqlJoins were transferred
|
||||
if len(preload.SqlJoins) != 2 {
|
||||
t.Fatalf("Expected 2 SQL joins, got %d", len(preload.SqlJoins))
|
||||
}
|
||||
|
||||
// Verify JoinAliases were extracted
|
||||
if len(preload.JoinAliases) != 2 {
|
||||
t.Fatalf("Expected 2 join aliases, got %d", len(preload.JoinAliases))
|
||||
}
|
||||
|
||||
// Verify the aliases are correct
|
||||
expectedAliases := []string{"d", "r"}
|
||||
for i, expected := range expectedAliases {
|
||||
if preload.JoinAliases[i] != expected {
|
||||
t.Errorf("Expected alias '%s', got '%s'", expected, preload.JoinAliases[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Verify filters were added
|
||||
if len(preload.Filters) != 2 {
|
||||
t.Fatalf("Expected 2 filters, got %d", len(preload.Filters))
|
||||
}
|
||||
|
||||
// Verify filter columns reference joined tables
|
||||
if preload.Filters[0].Column != "d.active" {
|
||||
t.Errorf("Expected filter column 'd.active', got '%s'", preload.Filters[0].Column)
|
||||
}
|
||||
if preload.Filters[1].Column != "r.name" {
|
||||
t.Errorf("Expected filter column 'r.name', got '%s'", preload.Filters[1].Column)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractJoinAlias(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
joinClause string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "LEFT JOIN with alias",
|
||||
joinClause: "LEFT JOIN departments d ON d.id = users.department_id",
|
||||
expected: "d",
|
||||
},
|
||||
{
|
||||
name: "INNER JOIN with AS keyword",
|
||||
joinClause: "INNER JOIN users AS u ON u.id = orders.user_id",
|
||||
expected: "u",
|
||||
},
|
||||
{
|
||||
name: "JOIN without alias",
|
||||
joinClause: "JOIN roles ON roles.id = users.role_id",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Complex join with multiple conditions",
|
||||
joinClause: "LEFT OUTER JOIN products p ON p.id = items.product_id AND p.active = true",
|
||||
expected: "p",
|
||||
},
|
||||
{
|
||||
name: "Invalid join (no ON clause)",
|
||||
joinClause: "LEFT JOIN departments",
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := extractJoinAlias(tt.joinClause)
|
||||
if result != tt.expected {
|
||||
t.Errorf("Expected alias '%s', got '%s'", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Note: The following functions are unexported (lowercase) and cannot be tested directly:
|
||||
// - parseSelectFields
|
||||
// - parseFieldFilter
|
||||
|
||||
391
pkg/restheadspec/recursive_preload_test.go
Normal file
391
pkg/restheadspec/recursive_preload_test.go
Normal file
@@ -0,0 +1,391 @@
|
||||
//go:build !integration
|
||||
// +build !integration
|
||||
|
||||
package restheadspec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
// TestRecursivePreloadClearsWhereClause tests that recursive preloads
|
||||
// correctly clear the WHERE clause from the parent level to allow
|
||||
// Bun to use foreign key relationships for loading children
|
||||
func TestRecursivePreloadClearsWhereClause(t *testing.T) {
|
||||
// Create a mock handler
|
||||
handler := &Handler{}
|
||||
|
||||
// Create a preload option with a WHERE clause that filters root items
|
||||
// This simulates the xfiles use case where the first level has a filter
|
||||
// like "rid_parentmastertaskitem is null" to get root items
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MastertaskItems",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
Where: "rid_parentmastertaskitem is null",
|
||||
Filters: []common.FilterOption{
|
||||
{
|
||||
Column: "rid_parentmastertaskitem",
|
||||
Operator: "is null",
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a mock query that tracks operations
|
||||
mockQuery := &mockSelectQuery{
|
||||
operations: []string{},
|
||||
}
|
||||
|
||||
// Apply the recursive preload at depth 0
|
||||
// This should:
|
||||
// 1. Apply the initial preload with the WHERE clause
|
||||
// 2. Create a recursive preload without the WHERE clause
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
|
||||
|
||||
// Verify the mock query received the operations
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Check that we have at least 2 PreloadRelation calls:
|
||||
// 1. The initial "MastertaskItems" with WHERE clause
|
||||
// 2. The recursive "MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM" without WHERE clause
|
||||
preloadCount := 0
|
||||
recursivePreloadFound := false
|
||||
whereAppliedToRecursive := false
|
||||
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MastertaskItems" {
|
||||
preloadCount++
|
||||
}
|
||||
if op == "PreloadRelation:MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM" {
|
||||
recursivePreloadFound = true
|
||||
}
|
||||
// Check if WHERE was applied to the recursive preload (it shouldn't be)
|
||||
if op == "Where:rid_parentmastertaskitem is null" && recursivePreloadFound {
|
||||
whereAppliedToRecursive = true
|
||||
}
|
||||
}
|
||||
|
||||
if preloadCount < 1 {
|
||||
t.Errorf("Expected at least 1 PreloadRelation call, got %d", preloadCount)
|
||||
}
|
||||
|
||||
if !recursivePreloadFound {
|
||||
t.Errorf("Expected recursive preload 'MastertaskItems.MastertaskItems_RID_PARENTMASTERTASKITEM' to be created. Operations: %v", mock.operations)
|
||||
}
|
||||
|
||||
if whereAppliedToRecursive {
|
||||
t.Error("WHERE clause should not be applied to recursive preload levels")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecursivePreloadWithChildRelations tests that child relations
|
||||
// (like DEF in MAL.DEF) are properly extended to recursive levels
|
||||
func TestRecursivePreloadWithChildRelations(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
// Create the main recursive preload
|
||||
recursivePreload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
Where: "rid_parentmastertaskitem is null",
|
||||
}
|
||||
|
||||
// Create a child relation that should be extended
|
||||
childPreload := common.PreloadOption{
|
||||
Relation: "MAL.DEF",
|
||||
}
|
||||
|
||||
mockQuery := &mockSelectQuery{
|
||||
operations: []string{},
|
||||
}
|
||||
|
||||
allPreloads := []common.PreloadOption{recursivePreload, childPreload}
|
||||
|
||||
// Apply both preloads - the child preload should be extended when the recursive one processes
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, allPreloads, nil, 0)
|
||||
|
||||
// Also need to apply the child preload separately (as would happen in normal flow)
|
||||
result = handler.applyPreloadWithRecursion(result, childPreload, allPreloads, nil, 0)
|
||||
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Check that the child relation was extended to recursive levels
|
||||
// We should see:
|
||||
// - MAL (with WHERE)
|
||||
// - MAL.DEF
|
||||
// - MAL.MAL_RID_PARENTMASTERTASKITEM (without WHERE)
|
||||
// - MAL.MAL_RID_PARENTMASTERTASKITEM.DEF (extended by recursive logic)
|
||||
foundMALDEF := false
|
||||
foundRecursiveMAL := false
|
||||
foundMALMALDEF := false
|
||||
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.DEF" {
|
||||
foundMALDEF = true
|
||||
}
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundRecursiveMAL = true
|
||||
}
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM.DEF" {
|
||||
foundMALMALDEF = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundMALDEF {
|
||||
t.Errorf("Expected child preload 'MAL.DEF' to be applied. Operations: %v", mock.operations)
|
||||
}
|
||||
|
||||
if !foundRecursiveMAL {
|
||||
t.Errorf("Expected recursive preload 'MAL.MAL_RID_PARENTMASTERTASKITEM' to be created. Operations: %v", mock.operations)
|
||||
}
|
||||
|
||||
if !foundMALMALDEF {
|
||||
t.Errorf("Expected child preload to be extended to 'MAL.MAL_RID_PARENTMASTERTASKITEM.DEF' at recursive level. Operations: %v", mock.operations)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRecursivePreloadGeneratesCorrectRelationName tests that the recursive
|
||||
// preload generates the correct FK-based relation name using RelatedKey
|
||||
func TestRecursivePreloadGeneratesCorrectRelationName(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
// Test case 1: With RelatedKey - should generate FK-based name
|
||||
t.Run("WithRelatedKey", func(t *testing.T) {
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
}
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
|
||||
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Should generate MAL.MAL_RID_PARENTMASTERTASKITEM
|
||||
foundCorrectRelation := false
|
||||
foundIncorrectRelation := false
|
||||
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundCorrectRelation = true
|
||||
}
|
||||
if op == "PreloadRelation:MAL.MAL" {
|
||||
foundIncorrectRelation = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundCorrectRelation {
|
||||
t.Errorf("Expected 'MAL.MAL_RID_PARENTMASTERTASKITEM' relation, operations: %v", mock.operations)
|
||||
}
|
||||
|
||||
if foundIncorrectRelation {
|
||||
t.Error("Should NOT generate 'MAL.MAL' relation when RelatedKey is specified")
|
||||
}
|
||||
})
|
||||
|
||||
// Test case 2: Without RelatedKey - should fallback to old behavior
|
||||
t.Run("WithoutRelatedKey", func(t *testing.T) {
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
// No RelatedKey
|
||||
}
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 0)
|
||||
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Should fallback to MAL.MAL
|
||||
foundFallback := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL" {
|
||||
foundFallback = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundFallback {
|
||||
t.Errorf("Expected fallback 'MAL.MAL' relation when no RelatedKey, operations: %v", mock.operations)
|
||||
}
|
||||
})
|
||||
|
||||
// Test case 3: Depth limit of 8
|
||||
t.Run("DepthLimit", func(t *testing.T) {
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
}
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
|
||||
// Start at depth 7 - should create one more level
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 7)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
foundDepth8 := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundDepth8 = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundDepth8 {
|
||||
t.Error("Expected to create recursive level at depth 8")
|
||||
}
|
||||
|
||||
// Start at depth 8 - should NOT create another level
|
||||
mockQuery2 := &mockSelectQuery{operations: []string{}}
|
||||
result2 := handler.applyPreloadWithRecursion(mockQuery2, preload, allPreloads, nil, 8)
|
||||
mock2 := result2.(*mockSelectQuery)
|
||||
|
||||
foundDepth9 := false
|
||||
for _, op := range mock2.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundDepth9 = true
|
||||
}
|
||||
}
|
||||
|
||||
if foundDepth9 {
|
||||
t.Error("Should NOT create recursive level beyond depth 8")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// mockSelectQuery implements common.SelectQuery for testing
|
||||
type mockSelectQuery struct {
|
||||
operations []string
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Model(model interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Model")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Table(table string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Table:"+table)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Column(columns ...string) common.SelectQuery {
|
||||
for _, col := range columns {
|
||||
m.operations = append(m.operations, "Column:"+col)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) ColumnExpr(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "ColumnExpr:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Where(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Where:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) WhereOr(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "WhereOr:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) WhereIn(column string, values interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "WhereIn:"+column)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Order(order string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Order:"+order)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) OrderExpr(order string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "OrderExpr:"+order)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Limit(limit int) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Limit")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Offset(offset int) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Offset")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Join(join string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Join:"+join)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) LeftJoin(join string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "LeftJoin:"+join)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Group(columns string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Group")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Having(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Having:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Preload(relation string, conditions ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Preload:"+relation)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) PreloadRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
|
||||
m.operations = append(m.operations, "PreloadRelation:"+relation)
|
||||
// Apply the preload modifiers
|
||||
for _, fn := range apply {
|
||||
fn(m)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) JoinRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
|
||||
m.operations = append(m.operations, "JoinRelation:"+relation)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Scan(ctx context.Context, dest interface{}) error {
|
||||
m.operations = append(m.operations, "Scan")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) ScanModel(ctx context.Context) error {
|
||||
m.operations = append(m.operations, "ScanModel")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Count(ctx context.Context) (int, error) {
|
||||
m.operations = append(m.operations, "Count")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Exists(ctx context.Context) (bool, error) {
|
||||
m.operations = append(m.operations, "Exists")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) GetUnderlyingQuery() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) GetModel() interface{} {
|
||||
return nil
|
||||
}
|
||||
525
pkg/restheadspec/xfiles_integration_test.go
Normal file
525
pkg/restheadspec/xfiles_integration_test.go
Normal file
@@ -0,0 +1,525 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package restheadspec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// mockSelectQuery implements common.SelectQuery for testing (integration version)
|
||||
type mockSelectQuery struct {
|
||||
operations []string
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Model(model interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Model")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Table(table string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Table:"+table)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Column(columns ...string) common.SelectQuery {
|
||||
for _, col := range columns {
|
||||
m.operations = append(m.operations, "Column:"+col)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) ColumnExpr(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "ColumnExpr:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Where(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Where:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) WhereOr(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "WhereOr:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) WhereIn(column string, values interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "WhereIn:"+column)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Order(order string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Order:"+order)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) OrderExpr(order string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "OrderExpr:"+order)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Limit(limit int) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Limit")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Offset(offset int) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Offset")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Join(join string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Join:"+join)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) LeftJoin(join string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "LeftJoin:"+join)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Group(columns string) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Group")
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Having(query string, args ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Having:"+query)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Preload(relation string, conditions ...interface{}) common.SelectQuery {
|
||||
m.operations = append(m.operations, "Preload:"+relation)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) PreloadRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
|
||||
m.operations = append(m.operations, "PreloadRelation:"+relation)
|
||||
// Apply the preload modifiers
|
||||
for _, fn := range apply {
|
||||
fn(m)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) JoinRelation(relation string, apply ...func(common.SelectQuery) common.SelectQuery) common.SelectQuery {
|
||||
m.operations = append(m.operations, "JoinRelation:"+relation)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Scan(ctx context.Context, dest interface{}) error {
|
||||
m.operations = append(m.operations, "Scan")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) ScanModel(ctx context.Context) error {
|
||||
m.operations = append(m.operations, "ScanModel")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Count(ctx context.Context) (int, error) {
|
||||
m.operations = append(m.operations, "Count")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) Exists(ctx context.Context) (bool, error) {
|
||||
m.operations = append(m.operations, "Exists")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) GetUnderlyingQuery() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockSelectQuery) GetModel() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestXFilesRecursivePreload is an integration test that validates the XFiles
|
||||
// recursive preload functionality using real test data files.
|
||||
//
|
||||
// This test ensures:
|
||||
// 1. XFiles request JSON is correctly parsed into PreloadOptions
|
||||
// 2. Recursive preload generates correct FK-based relation names (MAL_RID_PARENTMASTERTASKITEM)
|
||||
// 3. Parent WHERE clauses don't leak to child levels
|
||||
// 4. Child relations (like DEF) are extended to all recursive levels
|
||||
// 5. Hierarchical data structure matches expected output
|
||||
func TestXFilesRecursivePreload(t *testing.T) {
|
||||
// Load the XFiles request configuration
|
||||
requestPath := filepath.Join("..", "..", "tests", "data", "xfiles.request.json")
|
||||
requestData, err := os.ReadFile(requestPath)
|
||||
require.NoError(t, err, "Failed to read xfiles.request.json")
|
||||
|
||||
var xfileConfig XFiles
|
||||
err = json.Unmarshal(requestData, &xfileConfig)
|
||||
require.NoError(t, err, "Failed to parse xfiles.request.json")
|
||||
|
||||
// Create handler and parse XFiles into PreloadOptions
|
||||
handler := &Handler{}
|
||||
options := &ExtendedRequestOptions{
|
||||
RequestOptions: common.RequestOptions{
|
||||
Preload: []common.PreloadOption{},
|
||||
},
|
||||
}
|
||||
|
||||
// Process the XFiles configuration - start with the root table
|
||||
handler.processXFilesRelations(&xfileConfig, options, "")
|
||||
|
||||
// Verify that preload options were created
|
||||
require.NotEmpty(t, options.Preload, "Expected preload options to be created")
|
||||
|
||||
// Test 1: Verify recursive preload option has RelatedKey set
|
||||
t.Run("RecursivePreloadHasRelatedKey", func(t *testing.T) {
|
||||
// Find the recursive mastertaskitem preload
|
||||
var recursivePreload *common.PreloadOption
|
||||
for i := range options.Preload {
|
||||
preload := &options.Preload[i]
|
||||
if preload.Relation == "mastertask.mastertaskitem.mastertaskitem" && preload.Recursive {
|
||||
recursivePreload = preload
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, recursivePreload, "Expected to find recursive mastertaskitem preload")
|
||||
assert.Equal(t, "rid_parentmastertaskitem", recursivePreload.RelatedKey,
|
||||
"Recursive preload should have RelatedKey set from xfiles config")
|
||||
assert.True(t, recursivePreload.Recursive, "mastertaskitem preload should be marked as recursive")
|
||||
})
|
||||
|
||||
// Test 2: Verify root level mastertaskitem has WHERE clause for filtering root items
|
||||
t.Run("RootLevelHasWhereClause", func(t *testing.T) {
|
||||
var rootPreload *common.PreloadOption
|
||||
for i := range options.Preload {
|
||||
preload := &options.Preload[i]
|
||||
if preload.Relation == "mastertask.mastertaskitem" && !preload.Recursive {
|
||||
rootPreload = preload
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, rootPreload, "Expected to find root mastertaskitem preload")
|
||||
assert.NotEmpty(t, rootPreload.Where, "Root mastertaskitem should have WHERE clause")
|
||||
// The WHERE clause should filter for root items (rid_parentmastertaskitem is null)
|
||||
})
|
||||
|
||||
// Test 3: Verify actiondefinition relation exists for mastertaskitem
|
||||
t.Run("DEFRelationExists", func(t *testing.T) {
|
||||
var defPreload *common.PreloadOption
|
||||
for i := range options.Preload {
|
||||
preload := &options.Preload[i]
|
||||
if preload.Relation == "mastertask.mastertaskitem.actiondefinition" {
|
||||
defPreload = preload
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, defPreload, "Expected to find actiondefinition preload for mastertaskitem")
|
||||
assert.Equal(t, "rid_actiondefinition", defPreload.ForeignKey,
|
||||
"actiondefinition preload should have ForeignKey set")
|
||||
})
|
||||
|
||||
// Test 4: Verify relation name generation with mock query
|
||||
t.Run("RelationNameGeneration", func(t *testing.T) {
|
||||
// Find the recursive mastertaskitem preload
|
||||
var recursivePreload common.PreloadOption
|
||||
found := false
|
||||
for _, preload := range options.Preload {
|
||||
if preload.Relation == "mastertask.mastertaskitem.mastertaskitem" && preload.Recursive {
|
||||
recursivePreload = preload
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, found, "Expected to find recursive mastertaskitem preload")
|
||||
|
||||
// Create mock query to track operations
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
|
||||
// Apply the recursive preload
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// Verify the correct FK-based relation name was generated
|
||||
foundCorrectRelation := false
|
||||
foundIncorrectRelation := false
|
||||
|
||||
for _, op := range mock.operations {
|
||||
// Should generate: mastertask.mastertaskitem.mastertaskitem.mastertaskitem_RID_PARENTMASTERTASKITEM
|
||||
if op == "PreloadRelation:mastertask.mastertaskitem.mastertaskitem.mastertaskitem_RID_PARENTMASTERTASKITEM" {
|
||||
foundCorrectRelation = true
|
||||
}
|
||||
// Should NOT generate: mastertask.mastertaskitem.mastertaskitem.mastertaskitem
|
||||
if op == "PreloadRelation:mastertask.mastertaskitem.mastertaskitem.mastertaskitem" {
|
||||
foundIncorrectRelation = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundCorrectRelation,
|
||||
"Expected FK-based relation name 'mastertask.mastertaskitem.mastertaskitem.mastertaskitem_RID_PARENTMASTERTASKITEM' to be generated. Operations: %v",
|
||||
mock.operations)
|
||||
assert.False(t, foundIncorrectRelation,
|
||||
"Should NOT generate simple relation name when RelatedKey is set")
|
||||
})
|
||||
|
||||
// Test 5: Verify WHERE clause is cleared for recursive levels
|
||||
t.Run("WhereClauseClearedForChildren", func(t *testing.T) {
|
||||
// Find the recursive mastertaskitem preload with WHERE clause
|
||||
var recursivePreload common.PreloadOption
|
||||
found := false
|
||||
for _, preload := range options.Preload {
|
||||
if preload.Relation == "mastertask.mastertaskitem.mastertaskitem" && preload.Recursive {
|
||||
recursivePreload = preload
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, found, "Expected to find recursive mastertaskitem preload")
|
||||
|
||||
// The root level might have a WHERE clause
|
||||
// But when we apply recursion, it should be cleared
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// After the first level, WHERE clauses should not be reapplied
|
||||
// We check that the recursive relation was created (which means WHERE was cleared internally)
|
||||
foundRecursiveRelation := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:mastertask.mastertaskitem.mastertaskitem.mastertaskitem_RID_PARENTMASTERTASKITEM" {
|
||||
foundRecursiveRelation = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundRecursiveRelation,
|
||||
"Recursive relation should be created (WHERE clause should be cleared internally)")
|
||||
})
|
||||
|
||||
// Test 6: Verify child relations are extended to recursive levels
|
||||
t.Run("ChildRelationsExtended", func(t *testing.T) {
|
||||
// Find both the recursive mastertaskitem and the actiondefinition preloads
|
||||
var recursivePreload common.PreloadOption
|
||||
foundRecursive := false
|
||||
|
||||
for _, preload := range options.Preload {
|
||||
if preload.Relation == "mastertask.mastertaskitem.mastertaskitem" && preload.Recursive {
|
||||
recursivePreload = preload
|
||||
foundRecursive = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, foundRecursive, "Expected to find recursive mastertaskitem preload")
|
||||
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, recursivePreload, options.Preload, nil, 0)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
// actiondefinition should be extended to the recursive level
|
||||
// Expected: mastertask.mastertaskitem.mastertaskitem.mastertaskitem_RID_PARENTMASTERTASKITEM.actiondefinition
|
||||
foundExtendedDEF := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:mastertask.mastertaskitem.mastertaskitem.mastertaskitem_RID_PARENTMASTERTASKITEM.actiondefinition" {
|
||||
foundExtendedDEF = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundExtendedDEF,
|
||||
"Expected actiondefinition relation to be extended to recursive level. Operations: %v",
|
||||
mock.operations)
|
||||
})
|
||||
}
|
||||
|
||||
// TestXFilesRecursivePreloadDepth tests that recursive preloads respect the depth limit of 8
|
||||
func TestXFilesRecursivePreloadDepth(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
|
||||
preload := common.PreloadOption{
|
||||
Relation: "MAL",
|
||||
Recursive: true,
|
||||
RelatedKey: "rid_parentmastertaskitem",
|
||||
}
|
||||
|
||||
allPreloads := []common.PreloadOption{preload}
|
||||
|
||||
t.Run("Depth7CreatesLevel8", func(t *testing.T) {
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 7)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
foundDepth8 := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundDepth8 = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundDepth8, "Should create level 8 when starting at depth 7")
|
||||
})
|
||||
|
||||
t.Run("Depth8DoesNotCreateLevel9", func(t *testing.T) {
|
||||
mockQuery := &mockSelectQuery{operations: []string{}}
|
||||
result := handler.applyPreloadWithRecursion(mockQuery, preload, allPreloads, nil, 8)
|
||||
mock := result.(*mockSelectQuery)
|
||||
|
||||
foundDepth9 := false
|
||||
for _, op := range mock.operations {
|
||||
if op == "PreloadRelation:MAL.MAL_RID_PARENTMASTERTASKITEM" {
|
||||
foundDepth9 = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.False(t, foundDepth9, "Should NOT create level 9 (depth limit is 8)")
|
||||
})
|
||||
}
|
||||
|
||||
// TestXFilesResponseStructure validates the actual structure of the response
|
||||
// This test can be expanded when we have a full database integration test environment
|
||||
func TestXFilesResponseStructure(t *testing.T) {
|
||||
// Load the expected correct response
|
||||
correctResponsePath := filepath.Join("..", "..", "tests", "data", "xfiles.response.correct.json")
|
||||
correctData, err := os.ReadFile(correctResponsePath)
|
||||
require.NoError(t, err, "Failed to read xfiles.response.correct.json")
|
||||
|
||||
var correctResponse []map[string]interface{}
|
||||
err = json.Unmarshal(correctData, &correctResponse)
|
||||
require.NoError(t, err, "Failed to parse xfiles.response.correct.json")
|
||||
|
||||
// Test 1: Verify root level has exactly 1 masterprocess
|
||||
t.Run("RootLevelHasOneItem", func(t *testing.T) {
|
||||
assert.Len(t, correctResponse, 1, "Root level should have exactly 1 masterprocess record")
|
||||
})
|
||||
|
||||
// Test 2: Verify the root item has MTL relation
|
||||
t.Run("RootHasMTLRelation", func(t *testing.T) {
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, exists := rootItem["MTL"]
|
||||
assert.True(t, exists, "Root item should have MTL relation")
|
||||
assert.NotNil(t, mtl, "MTL relation should not be null")
|
||||
})
|
||||
|
||||
// Test 3: Verify MTL has MAL items
|
||||
t.Run("MTLHasMALItems", func(t *testing.T) {
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, ok := rootItem["MTL"].([]interface{})
|
||||
require.True(t, ok, "MTL should be an array")
|
||||
require.NotEmpty(t, mtl, "MTL should have items")
|
||||
|
||||
firstMTL, ok := mtl[0].(map[string]interface{})
|
||||
require.True(t, ok, "MTL item should be a map")
|
||||
|
||||
mal, exists := firstMTL["MAL"]
|
||||
assert.True(t, exists, "MTL item should have MAL relation")
|
||||
assert.NotNil(t, mal, "MAL relation should not be null")
|
||||
})
|
||||
|
||||
// Test 4: Verify MAL items have MAL_RID_PARENTMASTERTASKITEM relation (recursive)
|
||||
t.Run("MALHasRecursiveRelation", func(t *testing.T) {
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, ok := rootItem["MTL"].([]interface{})
|
||||
require.True(t, ok, "MTL should be an array")
|
||||
require.NotEmpty(t, mtl, "MTL should have items")
|
||||
|
||||
firstMTL, ok := mtl[0].(map[string]interface{})
|
||||
require.True(t, ok, "MTL item should be a map")
|
||||
|
||||
mal, ok := firstMTL["MAL"].([]interface{})
|
||||
require.True(t, ok, "MAL should be an array")
|
||||
require.NotEmpty(t, mal, "MAL should have items")
|
||||
|
||||
firstMAL, ok := mal[0].(map[string]interface{})
|
||||
require.True(t, ok, "MAL item should be a map")
|
||||
|
||||
// The key assertion: check for FK-based relation name
|
||||
recursiveRelation, exists := firstMAL["MAL_RID_PARENTMASTERTASKITEM"]
|
||||
assert.True(t, exists,
|
||||
"MAL item should have MAL_RID_PARENTMASTERTASKITEM relation (FK-based name)")
|
||||
|
||||
// It can be null or an array, depending on whether this item has children
|
||||
if recursiveRelation != nil {
|
||||
_, isArray := recursiveRelation.([]interface{})
|
||||
assert.True(t, isArray,
|
||||
"MAL_RID_PARENTMASTERTASKITEM should be an array when not null")
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Verify "Receive COB Document for" appears as a child, not at root
|
||||
t.Run("ChildItemsAreNested", func(t *testing.T) {
|
||||
// This test verifies that "Receive COB Document for" doesn't appear
|
||||
// multiple times at the wrong level, but is properly nested
|
||||
|
||||
// Count how many times we find this description at the MAL level (should be 0 or 1)
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, ok := rootItem["MTL"].([]interface{})
|
||||
require.True(t, ok, "MTL should be an array")
|
||||
require.NotEmpty(t, mtl, "MTL should have items")
|
||||
|
||||
firstMTL, ok := mtl[0].(map[string]interface{})
|
||||
require.True(t, ok, "MTL item should be a map")
|
||||
|
||||
mal, ok := firstMTL["MAL"].([]interface{})
|
||||
require.True(t, ok, "MAL should be an array")
|
||||
|
||||
// Count root-level MAL items (before the fix, there were 12; should be 1)
|
||||
assert.Len(t, mal, 1,
|
||||
"MAL should have exactly 1 root-level item (before fix: 12 duplicates)")
|
||||
|
||||
// Verify the root item has a description
|
||||
firstMAL, ok := mal[0].(map[string]interface{})
|
||||
require.True(t, ok, "MAL item should be a map")
|
||||
|
||||
description, exists := firstMAL["description"]
|
||||
assert.True(t, exists, "MAL item should have a description")
|
||||
assert.Equal(t, "Capture COB Information", description,
|
||||
"Root MAL item should be 'Capture COB Information'")
|
||||
})
|
||||
|
||||
// Test 6: Verify DEF relation exists at MAL level
|
||||
t.Run("DEFRelationExists", func(t *testing.T) {
|
||||
require.NotEmpty(t, correctResponse, "Response should not be empty")
|
||||
rootItem := correctResponse[0]
|
||||
|
||||
mtl, ok := rootItem["MTL"].([]interface{})
|
||||
require.True(t, ok, "MTL should be an array")
|
||||
require.NotEmpty(t, mtl, "MTL should have items")
|
||||
|
||||
firstMTL, ok := mtl[0].(map[string]interface{})
|
||||
require.True(t, ok, "MTL item should be a map")
|
||||
|
||||
mal, ok := firstMTL["MAL"].([]interface{})
|
||||
require.True(t, ok, "MAL should be an array")
|
||||
require.NotEmpty(t, mal, "MAL should have items")
|
||||
|
||||
firstMAL, ok := mal[0].(map[string]interface{})
|
||||
require.True(t, ok, "MAL item should be a map")
|
||||
|
||||
// Verify DEF relation exists (child relation extension)
|
||||
def, exists := firstMAL["DEF"]
|
||||
assert.True(t, exists, "MAL item should have DEF relation")
|
||||
|
||||
// DEF can be null or an object
|
||||
if def != nil {
|
||||
_, isMap := def.(map[string]interface{})
|
||||
assert.True(t, isMap, "DEF should be an object when not null")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -411,7 +411,9 @@ func newInstance(cfg Config) (*serverInstance, error) {
|
||||
return nil, fmt.Errorf("handler cannot be nil")
|
||||
}
|
||||
|
||||
// Set default timeouts
|
||||
// Set default timeouts with minimum of 10 minutes for connection timeouts
|
||||
minConnectionTimeout := 10 * time.Minute
|
||||
|
||||
if cfg.ShutdownTimeout == 0 {
|
||||
cfg.ShutdownTimeout = 30 * time.Second
|
||||
}
|
||||
@@ -419,13 +421,22 @@ func newInstance(cfg Config) (*serverInstance, error) {
|
||||
cfg.DrainTimeout = 25 * time.Second
|
||||
}
|
||||
if cfg.ReadTimeout == 0 {
|
||||
cfg.ReadTimeout = 15 * time.Second
|
||||
cfg.ReadTimeout = minConnectionTimeout
|
||||
} else if cfg.ReadTimeout < minConnectionTimeout {
|
||||
// Enforce minimum of 10 minutes
|
||||
cfg.ReadTimeout = minConnectionTimeout
|
||||
}
|
||||
if cfg.WriteTimeout == 0 {
|
||||
cfg.WriteTimeout = 15 * time.Second
|
||||
cfg.WriteTimeout = minConnectionTimeout
|
||||
} else if cfg.WriteTimeout < minConnectionTimeout {
|
||||
// Enforce minimum of 10 minutes
|
||||
cfg.WriteTimeout = minConnectionTimeout
|
||||
}
|
||||
if cfg.IdleTimeout == 0 {
|
||||
cfg.IdleTimeout = 60 * time.Second
|
||||
cfg.IdleTimeout = minConnectionTimeout
|
||||
} else if cfg.IdleTimeout < minConnectionTimeout {
|
||||
// Enforce minimum of 10 minutes
|
||||
cfg.IdleTimeout = minConnectionTimeout
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
|
||||
|
||||
@@ -4,6 +4,7 @@ package spectypes
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@@ -60,7 +61,33 @@ func (n *SqlNull[T]) Scan(value any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try standard sql.Null[T] first.
|
||||
// Check if T is []byte, and decode base64 if applicable
|
||||
// Do this BEFORE trying sql.Null to ensure base64 is handled
|
||||
var zero T
|
||||
if _, ok := any(zero).([]byte); ok {
|
||||
// For []byte types, try to decode from base64
|
||||
var strVal string
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
strVal = v
|
||||
case []byte:
|
||||
strVal = string(v)
|
||||
default:
|
||||
strVal = fmt.Sprintf("%v", value)
|
||||
}
|
||||
// Try base64 decode
|
||||
if decoded, err := base64.StdEncoding.DecodeString(strVal); err == nil {
|
||||
n.Val = any(decoded).(T)
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
// Fallback to raw bytes
|
||||
n.Val = any([]byte(strVal)).(T)
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try standard sql.Null[T] for other types.
|
||||
var sqlNull sql.Null[T]
|
||||
if err := sqlNull.Scan(value); err == nil {
|
||||
n.Val = sqlNull.V
|
||||
@@ -122,6 +149,9 @@ func (n *SqlNull[T]) FromString(s string) error {
|
||||
n.Val = any(u).(T)
|
||||
n.Valid = true
|
||||
}
|
||||
case []byte:
|
||||
n.Val = any([]byte(s)).(T)
|
||||
n.Valid = true
|
||||
case string:
|
||||
n.Val = any(s).(T)
|
||||
n.Valid = true
|
||||
@@ -149,6 +179,14 @@ func (n SqlNull[T]) MarshalJSON() ([]byte, error) {
|
||||
if !n.Valid {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
// Check if T is []byte, and encode to base64
|
||||
if _, ok := any(n.Val).([]byte); ok {
|
||||
// Encode []byte as base64
|
||||
encoded := base64.StdEncoding.EncodeToString(any(n.Val).([]byte))
|
||||
return json.Marshal(encoded)
|
||||
}
|
||||
|
||||
return json.Marshal(n.Val)
|
||||
}
|
||||
|
||||
@@ -160,8 +198,25 @@ func (n *SqlNull[T]) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try direct unmarshal.
|
||||
// Check if T is []byte, and decode from base64
|
||||
var val T
|
||||
if _, ok := any(val).([]byte); ok {
|
||||
// Unmarshal as string first (JSON representation)
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err == nil {
|
||||
// Decode from base64
|
||||
if decoded, err := base64.StdEncoding.DecodeString(s); err == nil {
|
||||
n.Val = any(decoded).(T)
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
// Fallback to raw string as bytes
|
||||
n.Val = any([]byte(s)).(T)
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &val); err == nil {
|
||||
n.Val = val
|
||||
n.Valid = true
|
||||
@@ -271,13 +326,14 @@ func (n SqlNull[T]) UUID() uuid.UUID {
|
||||
|
||||
// Type aliases for common types.
|
||||
type (
|
||||
SqlInt16 = SqlNull[int16]
|
||||
SqlInt32 = SqlNull[int32]
|
||||
SqlInt64 = SqlNull[int64]
|
||||
SqlFloat64 = SqlNull[float64]
|
||||
SqlBool = SqlNull[bool]
|
||||
SqlString = SqlNull[string]
|
||||
SqlUUID = SqlNull[uuid.UUID]
|
||||
SqlInt16 = SqlNull[int16]
|
||||
SqlInt32 = SqlNull[int32]
|
||||
SqlInt64 = SqlNull[int64]
|
||||
SqlFloat64 = SqlNull[float64]
|
||||
SqlBool = SqlNull[bool]
|
||||
SqlString = SqlNull[string]
|
||||
SqlByteArray = SqlNull[[]byte]
|
||||
SqlUUID = SqlNull[uuid.UUID]
|
||||
)
|
||||
|
||||
// SqlTimeStamp - Timestamp with custom formatting (YYYY-MM-DDTHH:MM:SS).
|
||||
@@ -581,6 +637,10 @@ func NewSqlString(v string) SqlString {
|
||||
return SqlString{Val: v, Valid: true}
|
||||
}
|
||||
|
||||
func NewSqlByteArray(v []byte) SqlByteArray {
|
||||
return SqlByteArray{Val: v, Valid: true}
|
||||
}
|
||||
|
||||
func NewSqlUUID(v uuid.UUID) SqlUUID {
|
||||
return SqlUUID{Val: v, Valid: true}
|
||||
}
|
||||
|
||||
@@ -565,3 +565,394 @@ func TestTryIfInt64(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSqlString tests SqlString without base64 (plain text)
|
||||
func TestSqlString_Scan(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input interface{}
|
||||
expected string
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "plain string",
|
||||
input: "hello world",
|
||||
expected: "hello world",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "plain text",
|
||||
input: "plain text",
|
||||
expected: "plain text",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "bytes as string",
|
||||
input: []byte("raw bytes"),
|
||||
expected: "raw bytes",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "nil value",
|
||||
input: nil,
|
||||
expected: "",
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var s SqlString
|
||||
if err := s.Scan(tt.input); err != nil {
|
||||
t.Fatalf("Scan failed: %v", err)
|
||||
}
|
||||
if s.Valid != tt.valid {
|
||||
t.Errorf("expected valid=%v, got valid=%v", tt.valid, s.Valid)
|
||||
}
|
||||
if tt.valid && s.String() != tt.expected {
|
||||
t.Errorf("expected %q, got %q", tt.expected, s.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlString_JSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputValue string
|
||||
expectedJSON string
|
||||
expectedDecode string
|
||||
}{
|
||||
{
|
||||
name: "simple string",
|
||||
inputValue: "hello world",
|
||||
expectedJSON: `"hello world"`, // plain text, not base64
|
||||
expectedDecode: "hello world",
|
||||
},
|
||||
{
|
||||
name: "special characters",
|
||||
inputValue: "test@#$%",
|
||||
expectedJSON: `"test@#$%"`, // plain text, not base64
|
||||
expectedDecode: "test@#$%",
|
||||
},
|
||||
{
|
||||
name: "unicode string",
|
||||
inputValue: "Hello 世界",
|
||||
expectedJSON: `"Hello 世界"`, // plain text, not base64
|
||||
expectedDecode: "Hello 世界",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
inputValue: "",
|
||||
expectedJSON: `""`,
|
||||
expectedDecode: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test MarshalJSON
|
||||
s := NewSqlString(tt.inputValue)
|
||||
data, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
if string(data) != tt.expectedJSON {
|
||||
t.Errorf("Marshal: expected %s, got %s", tt.expectedJSON, string(data))
|
||||
}
|
||||
|
||||
// Test UnmarshalJSON
|
||||
var s2 SqlString
|
||||
if err := json.Unmarshal(data, &s2); err != nil {
|
||||
t.Fatalf("Unmarshal failed: %v", err)
|
||||
}
|
||||
if !s2.Valid {
|
||||
t.Error("expected valid=true after unmarshal")
|
||||
}
|
||||
if s2.String() != tt.expectedDecode {
|
||||
t.Errorf("Unmarshal: expected %q, got %q", tt.expectedDecode, s2.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlString_JSON_Null(t *testing.T) {
|
||||
// Test null handling
|
||||
var s SqlString
|
||||
if err := json.Unmarshal([]byte("null"), &s); err != nil {
|
||||
t.Fatalf("Unmarshal null failed: %v", err)
|
||||
}
|
||||
if s.Valid {
|
||||
t.Error("expected invalid after unmarshaling null")
|
||||
}
|
||||
|
||||
// Test marshal null
|
||||
data, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
if string(data) != "null" {
|
||||
t.Errorf("expected null, got %s", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
// TestSqlByteArray_Base64 tests SqlByteArray with base64 encoding/decoding
|
||||
func TestSqlByteArray_Base64_Scan(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input interface{}
|
||||
expected []byte
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "base64 encoded bytes from SQL",
|
||||
input: "aGVsbG8gd29ybGQ=", // "hello world" in base64
|
||||
expected: []byte("hello world"),
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "plain bytes fallback",
|
||||
input: "plain text",
|
||||
expected: []byte("plain text"),
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "bytes base64 encoded",
|
||||
input: []byte("SGVsbG8gR29waGVy"), // "Hello Gopher" in base64
|
||||
expected: []byte("Hello Gopher"),
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "bytes plain fallback",
|
||||
input: []byte("raw bytes"),
|
||||
expected: []byte("raw bytes"),
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "binary data",
|
||||
input: "AQIDBA==", // []byte{1, 2, 3, 4} in base64
|
||||
expected: []byte{1, 2, 3, 4},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "nil value",
|
||||
input: nil,
|
||||
expected: nil,
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var b SqlByteArray
|
||||
if err := b.Scan(tt.input); err != nil {
|
||||
t.Fatalf("Scan failed: %v", err)
|
||||
}
|
||||
if b.Valid != tt.valid {
|
||||
t.Errorf("expected valid=%v, got valid=%v", tt.valid, b.Valid)
|
||||
}
|
||||
if tt.valid {
|
||||
if string(b.Val) != string(tt.expected) {
|
||||
t.Errorf("expected %q, got %q", tt.expected, b.Val)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlByteArray_Base64_JSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputValue []byte
|
||||
expectedJSON string
|
||||
expectedDecode []byte
|
||||
}{
|
||||
{
|
||||
name: "text bytes",
|
||||
inputValue: []byte("hello world"),
|
||||
expectedJSON: `"aGVsbG8gd29ybGQ="`, // base64 encoded
|
||||
expectedDecode: []byte("hello world"),
|
||||
},
|
||||
{
|
||||
name: "binary data",
|
||||
inputValue: []byte{0x01, 0x02, 0x03, 0x04, 0xFF},
|
||||
expectedJSON: `"AQIDBP8="`, // base64 encoded
|
||||
expectedDecode: []byte{0x01, 0x02, 0x03, 0x04, 0xFF},
|
||||
},
|
||||
{
|
||||
name: "empty bytes",
|
||||
inputValue: []byte{},
|
||||
expectedJSON: `""`, // base64 of empty bytes
|
||||
expectedDecode: []byte{},
|
||||
},
|
||||
{
|
||||
name: "unicode bytes",
|
||||
inputValue: []byte("Hello 世界"),
|
||||
expectedJSON: `"SGVsbG8g5LiW55WM"`, // base64 encoded
|
||||
expectedDecode: []byte("Hello 世界"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test MarshalJSON
|
||||
b := NewSqlByteArray(tt.inputValue)
|
||||
data, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
if string(data) != tt.expectedJSON {
|
||||
t.Errorf("Marshal: expected %s, got %s", tt.expectedJSON, string(data))
|
||||
}
|
||||
|
||||
// Test UnmarshalJSON
|
||||
var b2 SqlByteArray
|
||||
if err := json.Unmarshal(data, &b2); err != nil {
|
||||
t.Fatalf("Unmarshal failed: %v", err)
|
||||
}
|
||||
if !b2.Valid {
|
||||
t.Error("expected valid=true after unmarshal")
|
||||
}
|
||||
if string(b2.Val) != string(tt.expectedDecode) {
|
||||
t.Errorf("Unmarshal: expected %v, got %v", tt.expectedDecode, b2.Val)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlByteArray_Base64_JSON_Null(t *testing.T) {
|
||||
// Test null handling
|
||||
var b SqlByteArray
|
||||
if err := json.Unmarshal([]byte("null"), &b); err != nil {
|
||||
t.Fatalf("Unmarshal null failed: %v", err)
|
||||
}
|
||||
if b.Valid {
|
||||
t.Error("expected invalid after unmarshaling null")
|
||||
}
|
||||
|
||||
// Test marshal null
|
||||
data, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
if string(data) != "null" {
|
||||
t.Errorf("expected null, got %s", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlByteArray_Value(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input SqlByteArray
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
name: "valid bytes",
|
||||
input: NewSqlByteArray([]byte("test data")),
|
||||
expected: []byte("test data"),
|
||||
},
|
||||
{
|
||||
name: "empty bytes",
|
||||
input: NewSqlByteArray([]byte{}),
|
||||
expected: []byte{},
|
||||
},
|
||||
{
|
||||
name: "invalid",
|
||||
input: SqlByteArray{Valid: false},
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
val, err := tt.input.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("Value failed: %v", err)
|
||||
}
|
||||
if tt.expected == nil && val != nil {
|
||||
t.Errorf("expected nil, got %v", val)
|
||||
}
|
||||
if tt.expected != nil && val == nil {
|
||||
t.Errorf("expected %v, got nil", tt.expected)
|
||||
}
|
||||
if tt.expected != nil && val != nil {
|
||||
if string(val.([]byte)) != string(tt.expected.([]byte)) {
|
||||
t.Errorf("expected %v, got %v", tt.expected, val)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSqlString_RoundTrip tests complete round-trip: Go -> JSON -> Go -> SQL -> Go
|
||||
func TestSqlString_RoundTrip(t *testing.T) {
|
||||
original := "Test String with Special Chars: @#$%^&*()"
|
||||
|
||||
// Go -> JSON
|
||||
s1 := NewSqlString(original)
|
||||
jsonData, err := json.Marshal(s1)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
|
||||
// JSON -> Go
|
||||
var s2 SqlString
|
||||
if err := json.Unmarshal(jsonData, &s2); err != nil {
|
||||
t.Fatalf("Unmarshal failed: %v", err)
|
||||
}
|
||||
|
||||
// Go -> SQL (Value)
|
||||
_, err = s2.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("Value failed: %v", err)
|
||||
}
|
||||
|
||||
// SQL -> Go (Scan plain text)
|
||||
var s3 SqlString
|
||||
// Simulate SQL driver returning plain text value
|
||||
if err := s3.Scan(original); err != nil {
|
||||
t.Fatalf("Scan failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify round-trip
|
||||
if s3.String() != original {
|
||||
t.Errorf("Round-trip failed: expected %q, got %q", original, s3.String())
|
||||
}
|
||||
}
|
||||
|
||||
// TestSqlByteArray_Base64_RoundTrip tests complete round-trip: Go -> JSON -> Go -> SQL -> Go
|
||||
func TestSqlByteArray_Base64_RoundTrip(t *testing.T) {
|
||||
original := []byte{0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0xFF, 0xFE} // "Hello " + binary data
|
||||
|
||||
// Go -> JSON
|
||||
b1 := NewSqlByteArray(original)
|
||||
jsonData, err := json.Marshal(b1)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
|
||||
// JSON -> Go
|
||||
var b2 SqlByteArray
|
||||
if err := json.Unmarshal(jsonData, &b2); err != nil {
|
||||
t.Fatalf("Unmarshal failed: %v", err)
|
||||
}
|
||||
|
||||
// Go -> SQL (Value)
|
||||
_, err = b2.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("Value failed: %v", err)
|
||||
}
|
||||
|
||||
// SQL -> Go (Scan with base64)
|
||||
var b3 SqlByteArray
|
||||
// Simulate SQL driver returning base64 encoded value
|
||||
if err := b3.Scan("SGVsbG8g//4="); err != nil {
|
||||
t.Fatalf("Scan failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify round-trip
|
||||
if string(b3.Val) != string(original) {
|
||||
t.Errorf("Round-trip failed: expected %v, got %v", original, b3.Val)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user