mirror of
https://github.com/bitechdev/ResolveSpec.git
synced 2026-01-30 22:44:25 +00:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
584bb9813d | ||
|
|
17239d1611 | ||
|
|
defe27549b | ||
|
|
f7725340a6 | ||
|
|
07016d1b73 |
@@ -52,6 +52,10 @@ type PreloadOption struct {
|
||||
PrimaryKey string `json:"primary_key"` // Primary key of the related table
|
||||
RelatedKey string `json:"related_key"` // For child tables: column in child that references parent
|
||||
ForeignKey string `json:"foreign_key"` // For parent tables: column in current table that references parent
|
||||
|
||||
// Custom SQL JOINs from XFiles - used when preload needs additional joins
|
||||
SqlJoins []string `json:"sql_joins"` // Custom SQL JOIN clauses
|
||||
JoinAliases []string `json:"join_aliases"` // Extracted table aliases from SqlJoins for validation
|
||||
}
|
||||
|
||||
type FilterOption struct {
|
||||
|
||||
@@ -272,13 +272,29 @@ func (v *ColumnValidator) FilterRequestOptions(options RequestOptions) RequestOp
|
||||
filteredPreload.Columns = v.FilterValidColumns(preload.Columns)
|
||||
filteredPreload.OmitColumns = v.FilterValidColumns(preload.OmitColumns)
|
||||
|
||||
// Preserve SqlJoins and JoinAliases for preloads with custom joins
|
||||
filteredPreload.SqlJoins = preload.SqlJoins
|
||||
filteredPreload.JoinAliases = preload.JoinAliases
|
||||
|
||||
// Filter preload filters
|
||||
validPreloadFilters := make([]FilterOption, 0, len(preload.Filters))
|
||||
for _, filter := range preload.Filters {
|
||||
if v.IsValidColumn(filter.Column) {
|
||||
validPreloadFilters = append(validPreloadFilters, filter)
|
||||
} else {
|
||||
logger.Warn("Invalid column in preload '%s' filter '%s' removed", preload.Relation, filter.Column)
|
||||
// Check if the filter column references a joined table alias
|
||||
foundJoin := false
|
||||
for _, alias := range preload.JoinAliases {
|
||||
if strings.Contains(filter.Column, alias) {
|
||||
foundJoin = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundJoin {
|
||||
validPreloadFilters = append(validPreloadFilters, filter)
|
||||
} else {
|
||||
logger.Warn("Invalid column in preload '%s' filter '%s' removed", preload.Relation, filter.Column)
|
||||
}
|
||||
}
|
||||
}
|
||||
filteredPreload.Filters = validPreloadFilters
|
||||
|
||||
@@ -221,7 +221,10 @@ func (cc *ConnectionConfig) ApplyDefaults(global *ManagerConfig) {
|
||||
cc.ConnectTimeout = 10 * time.Second
|
||||
}
|
||||
if cc.QueryTimeout == 0 {
|
||||
cc.QueryTimeout = 30 * time.Second
|
||||
cc.QueryTimeout = 2 * time.Minute // Default to 2 minutes
|
||||
} else if cc.QueryTimeout < 2*time.Minute {
|
||||
// Enforce minimum of 2 minutes
|
||||
cc.QueryTimeout = 2 * time.Minute
|
||||
}
|
||||
|
||||
// Default ORM
|
||||
@@ -325,14 +328,29 @@ func (cc *ConnectionConfig) buildPostgresDSN() string {
|
||||
dsn += fmt.Sprintf(" search_path=%s", cc.Schema)
|
||||
}
|
||||
|
||||
// Add statement_timeout for query execution timeout (in milliseconds)
|
||||
if cc.QueryTimeout > 0 {
|
||||
timeoutMs := int(cc.QueryTimeout.Milliseconds())
|
||||
dsn += fmt.Sprintf(" statement_timeout=%d", timeoutMs)
|
||||
}
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
||||
func (cc *ConnectionConfig) buildSQLiteDSN() string {
|
||||
if cc.FilePath != "" {
|
||||
return cc.FilePath
|
||||
filepath := cc.FilePath
|
||||
if filepath == "" {
|
||||
filepath = ":memory:"
|
||||
}
|
||||
return ":memory:"
|
||||
|
||||
// Add query parameters for timeouts
|
||||
// Note: SQLite driver supports _timeout parameter (in milliseconds)
|
||||
if cc.QueryTimeout > 0 {
|
||||
timeoutMs := int(cc.QueryTimeout.Milliseconds())
|
||||
filepath += fmt.Sprintf("?_timeout=%d", timeoutMs)
|
||||
}
|
||||
|
||||
return filepath
|
||||
}
|
||||
|
||||
func (cc *ConnectionConfig) buildMSSQLDSN() string {
|
||||
@@ -344,6 +362,24 @@ func (cc *ConnectionConfig) buildMSSQLDSN() string {
|
||||
dsn += fmt.Sprintf("&schema=%s", cc.Schema)
|
||||
}
|
||||
|
||||
// Add connection timeout (in seconds)
|
||||
if cc.ConnectTimeout > 0 {
|
||||
timeoutSec := int(cc.ConnectTimeout.Seconds())
|
||||
dsn += fmt.Sprintf("&connection timeout=%d", timeoutSec)
|
||||
}
|
||||
|
||||
// Add dial timeout for TCP connection (in seconds)
|
||||
if cc.ConnectTimeout > 0 {
|
||||
dialTimeoutSec := int(cc.ConnectTimeout.Seconds())
|
||||
dsn += fmt.Sprintf("&dial timeout=%d", dialTimeoutSec)
|
||||
}
|
||||
|
||||
// Add read timeout (in seconds) - enforces timeout for reading data
|
||||
if cc.QueryTimeout > 0 {
|
||||
readTimeoutSec := int(cc.QueryTimeout.Seconds())
|
||||
dsn += fmt.Sprintf("&read timeout=%d", readTimeoutSec)
|
||||
}
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
||||
|
||||
@@ -76,8 +76,12 @@ func (p *SQLiteProvider) Connect(ctx context.Context, cfg ConnectionConfig) erro
|
||||
// Don't fail connection if WAL mode cannot be enabled
|
||||
}
|
||||
|
||||
// Set busy timeout to handle locked database
|
||||
_, err = db.ExecContext(ctx, "PRAGMA busy_timeout=5000")
|
||||
// Set busy timeout to handle locked database (minimum 2 minutes = 120000ms)
|
||||
busyTimeout := cfg.GetQueryTimeout().Milliseconds()
|
||||
if busyTimeout < 120000 {
|
||||
busyTimeout = 120000 // Enforce minimum of 2 minutes
|
||||
}
|
||||
_, err = db.ExecContext(ctx, fmt.Sprintf("PRAGMA busy_timeout=%d", busyTimeout))
|
||||
if err != nil {
|
||||
if cfg.GetEnableLogging() {
|
||||
logger.Warn("Failed to set busy timeout for SQLite", "error", err)
|
||||
|
||||
@@ -882,6 +882,15 @@ func (h *Handler) applyPreloadWithRecursion(query common.SelectQuery, preload co
|
||||
}
|
||||
}
|
||||
|
||||
// Apply custom SQL joins from XFiles
|
||||
if len(preload.SqlJoins) > 0 {
|
||||
logger.Debug("Applying %d SQL joins to preload %s", len(preload.SqlJoins), preload.Relation)
|
||||
for _, joinClause := range preload.SqlJoins {
|
||||
sq = sq.Join(joinClause)
|
||||
logger.Debug("Applied SQL join to preload %s: %s", preload.Relation, joinClause)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply filters
|
||||
if len(preload.Filters) > 0 {
|
||||
for _, filter := range preload.Filters {
|
||||
|
||||
@@ -1088,6 +1088,32 @@ func (h *Handler) addXFilesPreload(xfile *XFiles, options *ExtendedRequestOption
|
||||
logger.Debug("X-Files: Set foreign key for %s: %s", relationPath, xfile.ForeignKey)
|
||||
}
|
||||
|
||||
// Transfer SqlJoins from XFiles to PreloadOption
|
||||
if len(xfile.SqlJoins) > 0 {
|
||||
preloadOpt.SqlJoins = make([]string, 0, len(xfile.SqlJoins))
|
||||
preloadOpt.JoinAliases = make([]string, 0, len(xfile.SqlJoins))
|
||||
|
||||
for _, joinClause := range xfile.SqlJoins {
|
||||
// Sanitize the join clause
|
||||
sanitizedJoin := common.SanitizeWhereClause(joinClause, "", nil)
|
||||
if sanitizedJoin == "" {
|
||||
logger.Warn("X-Files: SqlJoin failed sanitization for %s: %s", relationPath, joinClause)
|
||||
continue
|
||||
}
|
||||
|
||||
preloadOpt.SqlJoins = append(preloadOpt.SqlJoins, sanitizedJoin)
|
||||
|
||||
// Extract join alias for validation
|
||||
alias := extractJoinAlias(sanitizedJoin)
|
||||
if alias != "" {
|
||||
preloadOpt.JoinAliases = append(preloadOpt.JoinAliases, alias)
|
||||
logger.Debug("X-Files: Extracted join alias for %s: %s", relationPath, alias)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("X-Files: Added %d SQL joins to preload %s", len(preloadOpt.SqlJoins), relationPath)
|
||||
}
|
||||
|
||||
// Add the preload option
|
||||
options.Preload = append(options.Preload, preloadOpt)
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ package restheadspec
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
)
|
||||
|
||||
func TestDecodeHeaderValue(t *testing.T) {
|
||||
@@ -37,6 +39,121 @@ func TestDecodeHeaderValue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddXFilesPreload_WithSqlJoins(t *testing.T) {
|
||||
handler := &Handler{}
|
||||
options := &ExtendedRequestOptions{
|
||||
RequestOptions: common.RequestOptions{
|
||||
Preload: make([]common.PreloadOption, 0),
|
||||
},
|
||||
}
|
||||
|
||||
// Create an XFiles with SqlJoins
|
||||
xfile := &XFiles{
|
||||
TableName: "users",
|
||||
SqlJoins: []string{
|
||||
"LEFT JOIN departments d ON d.id = users.department_id",
|
||||
"INNER JOIN roles r ON r.id = users.role_id",
|
||||
},
|
||||
FilterFields: []struct {
|
||||
Field string `json:"field"`
|
||||
Value string `json:"value"`
|
||||
Operator string `json:"operator"`
|
||||
}{
|
||||
{Field: "d.active", Value: "true", Operator: "eq"},
|
||||
{Field: "r.name", Value: "admin", Operator: "eq"},
|
||||
},
|
||||
}
|
||||
|
||||
// Add the XFiles preload
|
||||
handler.addXFilesPreload(xfile, options, "")
|
||||
|
||||
// Verify that a preload was added
|
||||
if len(options.Preload) != 1 {
|
||||
t.Fatalf("Expected 1 preload, got %d", len(options.Preload))
|
||||
}
|
||||
|
||||
preload := options.Preload[0]
|
||||
|
||||
// Verify relation name
|
||||
if preload.Relation != "users" {
|
||||
t.Errorf("Expected relation 'users', got '%s'", preload.Relation)
|
||||
}
|
||||
|
||||
// Verify SqlJoins were transferred
|
||||
if len(preload.SqlJoins) != 2 {
|
||||
t.Fatalf("Expected 2 SQL joins, got %d", len(preload.SqlJoins))
|
||||
}
|
||||
|
||||
// Verify JoinAliases were extracted
|
||||
if len(preload.JoinAliases) != 2 {
|
||||
t.Fatalf("Expected 2 join aliases, got %d", len(preload.JoinAliases))
|
||||
}
|
||||
|
||||
// Verify the aliases are correct
|
||||
expectedAliases := []string{"d", "r"}
|
||||
for i, expected := range expectedAliases {
|
||||
if preload.JoinAliases[i] != expected {
|
||||
t.Errorf("Expected alias '%s', got '%s'", expected, preload.JoinAliases[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Verify filters were added
|
||||
if len(preload.Filters) != 2 {
|
||||
t.Fatalf("Expected 2 filters, got %d", len(preload.Filters))
|
||||
}
|
||||
|
||||
// Verify filter columns reference joined tables
|
||||
if preload.Filters[0].Column != "d.active" {
|
||||
t.Errorf("Expected filter column 'd.active', got '%s'", preload.Filters[0].Column)
|
||||
}
|
||||
if preload.Filters[1].Column != "r.name" {
|
||||
t.Errorf("Expected filter column 'r.name', got '%s'", preload.Filters[1].Column)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractJoinAlias(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
joinClause string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "LEFT JOIN with alias",
|
||||
joinClause: "LEFT JOIN departments d ON d.id = users.department_id",
|
||||
expected: "d",
|
||||
},
|
||||
{
|
||||
name: "INNER JOIN with AS keyword",
|
||||
joinClause: "INNER JOIN users AS u ON u.id = orders.user_id",
|
||||
expected: "u",
|
||||
},
|
||||
{
|
||||
name: "JOIN without alias",
|
||||
joinClause: "JOIN roles ON roles.id = users.role_id",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Complex join with multiple conditions",
|
||||
joinClause: "LEFT OUTER JOIN products p ON p.id = items.product_id AND p.active = true",
|
||||
expected: "p",
|
||||
},
|
||||
{
|
||||
name: "Invalid join (no ON clause)",
|
||||
joinClause: "LEFT JOIN departments",
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := extractJoinAlias(tt.joinClause)
|
||||
if result != tt.expected {
|
||||
t.Errorf("Expected alias '%s', got '%s'", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Note: The following functions are unexported (lowercase) and cannot be tested directly:
|
||||
// - parseSelectFields
|
||||
// - parseFieldFilter
|
||||
|
||||
@@ -411,7 +411,9 @@ func newInstance(cfg Config) (*serverInstance, error) {
|
||||
return nil, fmt.Errorf("handler cannot be nil")
|
||||
}
|
||||
|
||||
// Set default timeouts
|
||||
// Set default timeouts with minimum of 10 minutes for connection timeouts
|
||||
minConnectionTimeout := 10 * time.Minute
|
||||
|
||||
if cfg.ShutdownTimeout == 0 {
|
||||
cfg.ShutdownTimeout = 30 * time.Second
|
||||
}
|
||||
@@ -419,13 +421,22 @@ func newInstance(cfg Config) (*serverInstance, error) {
|
||||
cfg.DrainTimeout = 25 * time.Second
|
||||
}
|
||||
if cfg.ReadTimeout == 0 {
|
||||
cfg.ReadTimeout = 15 * time.Second
|
||||
cfg.ReadTimeout = minConnectionTimeout
|
||||
} else if cfg.ReadTimeout < minConnectionTimeout {
|
||||
// Enforce minimum of 10 minutes
|
||||
cfg.ReadTimeout = minConnectionTimeout
|
||||
}
|
||||
if cfg.WriteTimeout == 0 {
|
||||
cfg.WriteTimeout = 15 * time.Second
|
||||
cfg.WriteTimeout = minConnectionTimeout
|
||||
} else if cfg.WriteTimeout < minConnectionTimeout {
|
||||
// Enforce minimum of 10 minutes
|
||||
cfg.WriteTimeout = minConnectionTimeout
|
||||
}
|
||||
if cfg.IdleTimeout == 0 {
|
||||
cfg.IdleTimeout = 60 * time.Second
|
||||
cfg.IdleTimeout = minConnectionTimeout
|
||||
} else if cfg.IdleTimeout < minConnectionTimeout {
|
||||
// Enforce minimum of 10 minutes
|
||||
cfg.IdleTimeout = minConnectionTimeout
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
|
||||
|
||||
@@ -4,6 +4,7 @@ package spectypes
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@@ -60,7 +61,33 @@ func (n *SqlNull[T]) Scan(value any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try standard sql.Null[T] first.
|
||||
// Check if T is []byte, and decode base64 if applicable
|
||||
// Do this BEFORE trying sql.Null to ensure base64 is handled
|
||||
var zero T
|
||||
if _, ok := any(zero).([]byte); ok {
|
||||
// For []byte types, try to decode from base64
|
||||
var strVal string
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
strVal = v
|
||||
case []byte:
|
||||
strVal = string(v)
|
||||
default:
|
||||
strVal = fmt.Sprintf("%v", value)
|
||||
}
|
||||
// Try base64 decode
|
||||
if decoded, err := base64.StdEncoding.DecodeString(strVal); err == nil {
|
||||
n.Val = any(decoded).(T)
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
// Fallback to raw bytes
|
||||
n.Val = any([]byte(strVal)).(T)
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try standard sql.Null[T] for other types.
|
||||
var sqlNull sql.Null[T]
|
||||
if err := sqlNull.Scan(value); err == nil {
|
||||
n.Val = sqlNull.V
|
||||
@@ -122,6 +149,9 @@ func (n *SqlNull[T]) FromString(s string) error {
|
||||
n.Val = any(u).(T)
|
||||
n.Valid = true
|
||||
}
|
||||
case []byte:
|
||||
n.Val = any([]byte(s)).(T)
|
||||
n.Valid = true
|
||||
case string:
|
||||
n.Val = any(s).(T)
|
||||
n.Valid = true
|
||||
@@ -149,6 +179,14 @@ func (n SqlNull[T]) MarshalJSON() ([]byte, error) {
|
||||
if !n.Valid {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
// Check if T is []byte, and encode to base64
|
||||
if _, ok := any(n.Val).([]byte); ok {
|
||||
// Encode []byte as base64
|
||||
encoded := base64.StdEncoding.EncodeToString(any(n.Val).([]byte))
|
||||
return json.Marshal(encoded)
|
||||
}
|
||||
|
||||
return json.Marshal(n.Val)
|
||||
}
|
||||
|
||||
@@ -160,8 +198,25 @@ func (n *SqlNull[T]) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try direct unmarshal.
|
||||
// Check if T is []byte, and decode from base64
|
||||
var val T
|
||||
if _, ok := any(val).([]byte); ok {
|
||||
// Unmarshal as string first (JSON representation)
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err == nil {
|
||||
// Decode from base64
|
||||
if decoded, err := base64.StdEncoding.DecodeString(s); err == nil {
|
||||
n.Val = any(decoded).(T)
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
// Fallback to raw string as bytes
|
||||
n.Val = any([]byte(s)).(T)
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &val); err == nil {
|
||||
n.Val = val
|
||||
n.Valid = true
|
||||
@@ -271,13 +326,14 @@ func (n SqlNull[T]) UUID() uuid.UUID {
|
||||
|
||||
// Type aliases for common types.
|
||||
type (
|
||||
SqlInt16 = SqlNull[int16]
|
||||
SqlInt32 = SqlNull[int32]
|
||||
SqlInt64 = SqlNull[int64]
|
||||
SqlFloat64 = SqlNull[float64]
|
||||
SqlBool = SqlNull[bool]
|
||||
SqlString = SqlNull[string]
|
||||
SqlUUID = SqlNull[uuid.UUID]
|
||||
SqlInt16 = SqlNull[int16]
|
||||
SqlInt32 = SqlNull[int32]
|
||||
SqlInt64 = SqlNull[int64]
|
||||
SqlFloat64 = SqlNull[float64]
|
||||
SqlBool = SqlNull[bool]
|
||||
SqlString = SqlNull[string]
|
||||
SqlByteArray = SqlNull[[]byte]
|
||||
SqlUUID = SqlNull[uuid.UUID]
|
||||
)
|
||||
|
||||
// SqlTimeStamp - Timestamp with custom formatting (YYYY-MM-DDTHH:MM:SS).
|
||||
@@ -581,6 +637,10 @@ func NewSqlString(v string) SqlString {
|
||||
return SqlString{Val: v, Valid: true}
|
||||
}
|
||||
|
||||
func NewSqlByteArray(v []byte) SqlByteArray {
|
||||
return SqlByteArray{Val: v, Valid: true}
|
||||
}
|
||||
|
||||
func NewSqlUUID(v uuid.UUID) SqlUUID {
|
||||
return SqlUUID{Val: v, Valid: true}
|
||||
}
|
||||
|
||||
@@ -565,3 +565,394 @@ func TestTryIfInt64(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSqlString tests SqlString without base64 (plain text)
|
||||
func TestSqlString_Scan(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input interface{}
|
||||
expected string
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "plain string",
|
||||
input: "hello world",
|
||||
expected: "hello world",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "plain text",
|
||||
input: "plain text",
|
||||
expected: "plain text",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "bytes as string",
|
||||
input: []byte("raw bytes"),
|
||||
expected: "raw bytes",
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "nil value",
|
||||
input: nil,
|
||||
expected: "",
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var s SqlString
|
||||
if err := s.Scan(tt.input); err != nil {
|
||||
t.Fatalf("Scan failed: %v", err)
|
||||
}
|
||||
if s.Valid != tt.valid {
|
||||
t.Errorf("expected valid=%v, got valid=%v", tt.valid, s.Valid)
|
||||
}
|
||||
if tt.valid && s.String() != tt.expected {
|
||||
t.Errorf("expected %q, got %q", tt.expected, s.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlString_JSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputValue string
|
||||
expectedJSON string
|
||||
expectedDecode string
|
||||
}{
|
||||
{
|
||||
name: "simple string",
|
||||
inputValue: "hello world",
|
||||
expectedJSON: `"hello world"`, // plain text, not base64
|
||||
expectedDecode: "hello world",
|
||||
},
|
||||
{
|
||||
name: "special characters",
|
||||
inputValue: "test@#$%",
|
||||
expectedJSON: `"test@#$%"`, // plain text, not base64
|
||||
expectedDecode: "test@#$%",
|
||||
},
|
||||
{
|
||||
name: "unicode string",
|
||||
inputValue: "Hello 世界",
|
||||
expectedJSON: `"Hello 世界"`, // plain text, not base64
|
||||
expectedDecode: "Hello 世界",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
inputValue: "",
|
||||
expectedJSON: `""`,
|
||||
expectedDecode: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test MarshalJSON
|
||||
s := NewSqlString(tt.inputValue)
|
||||
data, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
if string(data) != tt.expectedJSON {
|
||||
t.Errorf("Marshal: expected %s, got %s", tt.expectedJSON, string(data))
|
||||
}
|
||||
|
||||
// Test UnmarshalJSON
|
||||
var s2 SqlString
|
||||
if err := json.Unmarshal(data, &s2); err != nil {
|
||||
t.Fatalf("Unmarshal failed: %v", err)
|
||||
}
|
||||
if !s2.Valid {
|
||||
t.Error("expected valid=true after unmarshal")
|
||||
}
|
||||
if s2.String() != tt.expectedDecode {
|
||||
t.Errorf("Unmarshal: expected %q, got %q", tt.expectedDecode, s2.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlString_JSON_Null(t *testing.T) {
|
||||
// Test null handling
|
||||
var s SqlString
|
||||
if err := json.Unmarshal([]byte("null"), &s); err != nil {
|
||||
t.Fatalf("Unmarshal null failed: %v", err)
|
||||
}
|
||||
if s.Valid {
|
||||
t.Error("expected invalid after unmarshaling null")
|
||||
}
|
||||
|
||||
// Test marshal null
|
||||
data, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
if string(data) != "null" {
|
||||
t.Errorf("expected null, got %s", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
// TestSqlByteArray_Base64 tests SqlByteArray with base64 encoding/decoding
|
||||
func TestSqlByteArray_Base64_Scan(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input interface{}
|
||||
expected []byte
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "base64 encoded bytes from SQL",
|
||||
input: "aGVsbG8gd29ybGQ=", // "hello world" in base64
|
||||
expected: []byte("hello world"),
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "plain bytes fallback",
|
||||
input: "plain text",
|
||||
expected: []byte("plain text"),
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "bytes base64 encoded",
|
||||
input: []byte("SGVsbG8gR29waGVy"), // "Hello Gopher" in base64
|
||||
expected: []byte("Hello Gopher"),
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "bytes plain fallback",
|
||||
input: []byte("raw bytes"),
|
||||
expected: []byte("raw bytes"),
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "binary data",
|
||||
input: "AQIDBA==", // []byte{1, 2, 3, 4} in base64
|
||||
expected: []byte{1, 2, 3, 4},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "nil value",
|
||||
input: nil,
|
||||
expected: nil,
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var b SqlByteArray
|
||||
if err := b.Scan(tt.input); err != nil {
|
||||
t.Fatalf("Scan failed: %v", err)
|
||||
}
|
||||
if b.Valid != tt.valid {
|
||||
t.Errorf("expected valid=%v, got valid=%v", tt.valid, b.Valid)
|
||||
}
|
||||
if tt.valid {
|
||||
if string(b.Val) != string(tt.expected) {
|
||||
t.Errorf("expected %q, got %q", tt.expected, b.Val)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlByteArray_Base64_JSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputValue []byte
|
||||
expectedJSON string
|
||||
expectedDecode []byte
|
||||
}{
|
||||
{
|
||||
name: "text bytes",
|
||||
inputValue: []byte("hello world"),
|
||||
expectedJSON: `"aGVsbG8gd29ybGQ="`, // base64 encoded
|
||||
expectedDecode: []byte("hello world"),
|
||||
},
|
||||
{
|
||||
name: "binary data",
|
||||
inputValue: []byte{0x01, 0x02, 0x03, 0x04, 0xFF},
|
||||
expectedJSON: `"AQIDBP8="`, // base64 encoded
|
||||
expectedDecode: []byte{0x01, 0x02, 0x03, 0x04, 0xFF},
|
||||
},
|
||||
{
|
||||
name: "empty bytes",
|
||||
inputValue: []byte{},
|
||||
expectedJSON: `""`, // base64 of empty bytes
|
||||
expectedDecode: []byte{},
|
||||
},
|
||||
{
|
||||
name: "unicode bytes",
|
||||
inputValue: []byte("Hello 世界"),
|
||||
expectedJSON: `"SGVsbG8g5LiW55WM"`, // base64 encoded
|
||||
expectedDecode: []byte("Hello 世界"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test MarshalJSON
|
||||
b := NewSqlByteArray(tt.inputValue)
|
||||
data, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
if string(data) != tt.expectedJSON {
|
||||
t.Errorf("Marshal: expected %s, got %s", tt.expectedJSON, string(data))
|
||||
}
|
||||
|
||||
// Test UnmarshalJSON
|
||||
var b2 SqlByteArray
|
||||
if err := json.Unmarshal(data, &b2); err != nil {
|
||||
t.Fatalf("Unmarshal failed: %v", err)
|
||||
}
|
||||
if !b2.Valid {
|
||||
t.Error("expected valid=true after unmarshal")
|
||||
}
|
||||
if string(b2.Val) != string(tt.expectedDecode) {
|
||||
t.Errorf("Unmarshal: expected %v, got %v", tt.expectedDecode, b2.Val)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlByteArray_Base64_JSON_Null(t *testing.T) {
|
||||
// Test null handling
|
||||
var b SqlByteArray
|
||||
if err := json.Unmarshal([]byte("null"), &b); err != nil {
|
||||
t.Fatalf("Unmarshal null failed: %v", err)
|
||||
}
|
||||
if b.Valid {
|
||||
t.Error("expected invalid after unmarshaling null")
|
||||
}
|
||||
|
||||
// Test marshal null
|
||||
data, err := json.Marshal(b)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
if string(data) != "null" {
|
||||
t.Errorf("expected null, got %s", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSqlByteArray_Value(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input SqlByteArray
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
name: "valid bytes",
|
||||
input: NewSqlByteArray([]byte("test data")),
|
||||
expected: []byte("test data"),
|
||||
},
|
||||
{
|
||||
name: "empty bytes",
|
||||
input: NewSqlByteArray([]byte{}),
|
||||
expected: []byte{},
|
||||
},
|
||||
{
|
||||
name: "invalid",
|
||||
input: SqlByteArray{Valid: false},
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
val, err := tt.input.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("Value failed: %v", err)
|
||||
}
|
||||
if tt.expected == nil && val != nil {
|
||||
t.Errorf("expected nil, got %v", val)
|
||||
}
|
||||
if tt.expected != nil && val == nil {
|
||||
t.Errorf("expected %v, got nil", tt.expected)
|
||||
}
|
||||
if tt.expected != nil && val != nil {
|
||||
if string(val.([]byte)) != string(tt.expected.([]byte)) {
|
||||
t.Errorf("expected %v, got %v", tt.expected, val)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSqlString_RoundTrip tests complete round-trip: Go -> JSON -> Go -> SQL -> Go
|
||||
func TestSqlString_RoundTrip(t *testing.T) {
|
||||
original := "Test String with Special Chars: @#$%^&*()"
|
||||
|
||||
// Go -> JSON
|
||||
s1 := NewSqlString(original)
|
||||
jsonData, err := json.Marshal(s1)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
|
||||
// JSON -> Go
|
||||
var s2 SqlString
|
||||
if err := json.Unmarshal(jsonData, &s2); err != nil {
|
||||
t.Fatalf("Unmarshal failed: %v", err)
|
||||
}
|
||||
|
||||
// Go -> SQL (Value)
|
||||
_, err = s2.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("Value failed: %v", err)
|
||||
}
|
||||
|
||||
// SQL -> Go (Scan plain text)
|
||||
var s3 SqlString
|
||||
// Simulate SQL driver returning plain text value
|
||||
if err := s3.Scan(original); err != nil {
|
||||
t.Fatalf("Scan failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify round-trip
|
||||
if s3.String() != original {
|
||||
t.Errorf("Round-trip failed: expected %q, got %q", original, s3.String())
|
||||
}
|
||||
}
|
||||
|
||||
// TestSqlByteArray_Base64_RoundTrip tests complete round-trip: Go -> JSON -> Go -> SQL -> Go
|
||||
func TestSqlByteArray_Base64_RoundTrip(t *testing.T) {
|
||||
original := []byte{0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0xFF, 0xFE} // "Hello " + binary data
|
||||
|
||||
// Go -> JSON
|
||||
b1 := NewSqlByteArray(original)
|
||||
jsonData, err := json.Marshal(b1)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal failed: %v", err)
|
||||
}
|
||||
|
||||
// JSON -> Go
|
||||
var b2 SqlByteArray
|
||||
if err := json.Unmarshal(jsonData, &b2); err != nil {
|
||||
t.Fatalf("Unmarshal failed: %v", err)
|
||||
}
|
||||
|
||||
// Go -> SQL (Value)
|
||||
_, err = b2.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("Value failed: %v", err)
|
||||
}
|
||||
|
||||
// SQL -> Go (Scan with base64)
|
||||
var b3 SqlByteArray
|
||||
// Simulate SQL driver returning base64 encoded value
|
||||
if err := b3.Scan("SGVsbG8g//4="); err != nil {
|
||||
t.Fatalf("Scan failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify round-trip
|
||||
if string(b3.Val) != string(original) {
|
||||
t.Errorf("Round-trip failed: expected %v, got %v", original, b3.Val)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user