8 Commits

Author SHA1 Message Date
a54594e49b feat(writer): 🎉 Add support for unique constraints in schema generation
All checks were successful
CI / Test (1.24) (push) Successful in -26m26s
CI / Test (1.25) (push) Successful in -26m18s
CI / Lint (push) Successful in -26m25s
CI / Build (push) Successful in -26m35s
Release / Build and Release (push) Successful in -26m29s
Integration Tests / Integration Tests (push) Successful in -26m11s
* Implement unique constraint handling in GenerateSchemaStatements
* Add writeUniqueConstraints method for generating SQL statements
* Create unit test for unique constraints in writer_test.go
2026-01-31 20:33:08 +02:00
cafe6a461f feat(scripts): 🎉 Add --ignore-errors flag for script execution
All checks were successful
CI / Test (1.24) (push) Successful in -26m18s
CI / Test (1.25) (push) Successful in -26m14s
CI / Build (push) Successful in -26m38s
CI / Lint (push) Successful in -26m30s
Release / Build and Release (push) Successful in -26m27s
Integration Tests / Integration Tests (push) Successful in -26m10s
- Allow continued execution of scripts even if errors occur.
- Update execution summary to include counts of successful and failed scripts.
- Enhance error handling and reporting for better visibility.
2026-01-31 20:21:22 +02:00
abdb9b4c78 feat(dbml/reader): 🎉 Implement splitIdentifier function for parsing
All checks were successful
CI / Test (1.24) (push) Successful in -26m24s
CI / Test (1.25) (push) Successful in -26m17s
CI / Build (push) Successful in -26m44s
CI / Lint (push) Successful in -26m33s
Integration Tests / Integration Tests (push) Successful in -26m11s
Release / Build and Release (push) Successful in -26m36s
2026-01-31 19:45:24 +02:00
e7a15c8e4f feat(writer): 🎉 Implement add column statements for schema evolution
All checks were successful
CI / Test (1.24) (push) Successful in -26m24s
CI / Test (1.25) (push) Successful in -26m14s
CI / Lint (push) Successful in -26m30s
CI / Build (push) Successful in -26m41s
Release / Build and Release (push) Successful in -26m29s
Integration Tests / Integration Tests (push) Successful in -26m13s
* Add functionality to generate ALTER TABLE ADD COLUMN statements for existing tables.
* Introduce tests for generating and writing add column statements.
* Enhance schema evolution capabilities when new columns are added.
2026-01-31 19:12:00 +02:00
c36b5ede2b feat(writer): 🎉 Enhance primary key handling and add tests
All checks were successful
CI / Test (1.24) (push) Successful in -26m18s
CI / Test (1.25) (push) Successful in -26m11s
CI / Build (push) Successful in -26m43s
CI / Lint (push) Successful in -26m34s
Release / Build and Release (push) Successful in -26m31s
Integration Tests / Integration Tests (push) Successful in -26m20s
* Implement checks for existing primary keys before adding new ones.
* Drop auto-generated primary keys if they exist.
* Add tests for primary key existence and column size specifiers.
* Improve type conversion handling for PostgreSQL compatibility.
2026-01-31 18:59:32 +02:00
51ab29f8e3 feat(writer): 🎉 Update index naming conventions for consistency
All checks were successful
CI / Test (1.24) (push) Successful in -26m25s
CI / Test (1.25) (push) Successful in -26m17s
CI / Lint (push) Successful in -26m32s
CI / Build (push) Successful in -26m42s
Release / Build and Release (push) Successful in -26m31s
Integration Tests / Integration Tests (push) Successful in -26m24s
* Use SQLName() for primary key constraint naming
* Enhance index name formatting with column suffix
2026-01-31 17:23:18 +02:00
f532fc110c feat(writer): 🎉 Enhance script execution order and add symlink skipping
All checks were successful
CI / Test (1.24) (push) Successful in -26m10s
CI / Test (1.25) (push) Successful in -26m8s
CI / Build (push) Successful in -26m44s
CI / Lint (push) Successful in -26m32s
Integration Tests / Integration Tests (push) Successful in -26m26s
* Update script execution to sort by Priority, Sequence, and Name.
* Add functionality to skip symbolic links during directory scanning.
* Improve documentation to reflect changes in execution order and features.
* Add tests for symlink skipping and ensure correct script sorting.
2026-01-31 16:59:17 +02:00
92dff99725 feat(writer): enhance type conversion for PostgreSQL compatibility and add tests
Some checks failed
CI / Test (1.24) (push) Successful in -26m32s
CI / Test (1.25) (push) Successful in -26m27s
CI / Build (push) Successful in -26m48s
CI / Lint (push) Successful in -26m33s
Integration Tests / Integration Tests (push) Failing after -26m51s
Release / Build and Release (push) Successful in -26m41s
2026-01-29 21:36:23 +02:00
16 changed files with 1321 additions and 121 deletions

1
.gitignore vendored
View File

@@ -47,3 +47,4 @@ dist/
build/
bin/
tests/integration/failed_statements_example.txt
test_output.log

View File

@@ -18,6 +18,7 @@ var (
scriptsConn string
scriptsSchemaName string
scriptsDBName string
scriptsIgnoreErrors bool
)
var scriptsCmd = &cobra.Command{
@@ -39,8 +40,8 @@ Example filenames (hyphen format):
1-002-create-posts.sql # Priority 1, Sequence 2
10-10-create-newid.pgsql # Priority 10, Sequence 10
Both formats can be mixed in the same directory.
Scripts are executed in order: Priority (ascending), then Sequence (ascending).`,
Both formats can be mixed in the same directory and subdirectories.
Scripts are executed in order: Priority (ascending), Sequence (ascending), Name (alphabetical).`,
}
var scriptsListCmd = &cobra.Command{
@@ -48,8 +49,8 @@ var scriptsListCmd = &cobra.Command{
Short: "List SQL scripts from a directory",
Long: `List SQL scripts from a directory and show their execution order.
The scripts are read from the specified directory and displayed in the order
they would be executed (Priority ascending, then Sequence ascending).
The scripts are read recursively from the specified directory and displayed in the order
they would be executed: Priority (ascending), then Sequence (ascending), then Name (alphabetical).
Example:
relspec scripts list --dir ./migrations`,
@@ -61,10 +62,10 @@ var scriptsExecuteCmd = &cobra.Command{
Short: "Execute SQL scripts against a database",
Long: `Execute SQL scripts from a directory against a PostgreSQL database.
Scripts are executed in order: Priority (ascending), then Sequence (ascending).
Execution stops immediately on the first error.
Scripts are executed in order: Priority (ascending), Sequence (ascending), Name (alphabetical).
By default, execution stops immediately on the first error. Use --ignore-errors to continue execution.
The directory is scanned recursively for files matching the patterns:
The directory is scanned recursively for all subdirectories and files matching the patterns:
{priority}_{sequence}_{name}.sql or .pgsql (underscore format)
{priority}-{sequence}-{name}.sql or .pgsql (hyphen format)
@@ -75,7 +76,7 @@ PostgreSQL Connection String Examples:
postgresql://user:pass@host/dbname?sslmode=require
Examples:
# Execute migration scripts
# Execute migration scripts from a directory (including subdirectories)
relspec scripts execute --dir ./migrations \
--conn "postgres://user:pass@localhost:5432/mydb"
@@ -86,7 +87,12 @@ Examples:
# Execute with SSL disabled
relspec scripts execute --dir ./sql \
--conn "postgres://user:pass@localhost/db?sslmode=disable"`,
--conn "postgres://user:pass@localhost/db?sslmode=disable"
# Continue executing even if errors occur
relspec scripts execute --dir ./migrations \
--conn "postgres://localhost/mydb" \
--ignore-errors`,
RunE: runScriptsExecute,
}
@@ -105,6 +111,7 @@ func init() {
scriptsExecuteCmd.Flags().StringVar(&scriptsConn, "conn", "", "PostgreSQL connection string (required)")
scriptsExecuteCmd.Flags().StringVar(&scriptsSchemaName, "schema", "public", "Schema name (optional, default: public)")
scriptsExecuteCmd.Flags().StringVar(&scriptsDBName, "database", "database", "Database name (optional, default: database)")
scriptsExecuteCmd.Flags().BoolVar(&scriptsIgnoreErrors, "ignore-errors", false, "Continue executing scripts even if errors occur")
err = scriptsExecuteCmd.MarkFlagRequired("dir")
if err != nil {
@@ -149,7 +156,7 @@ func runScriptsList(cmd *cobra.Command, args []string) error {
return nil
}
// Sort scripts by Priority then Sequence
// Sort scripts by Priority, Sequence, then Name
sortedScripts := make([]*struct {
name string
priority int
@@ -186,7 +193,10 @@ func runScriptsList(cmd *cobra.Command, args []string) error {
if sortedScripts[i].priority != sortedScripts[j].priority {
return sortedScripts[i].priority < sortedScripts[j].priority
}
if sortedScripts[i].sequence != sortedScripts[j].sequence {
return sortedScripts[i].sequence < sortedScripts[j].sequence
}
return sortedScripts[i].name < sortedScripts[j].name
})
fmt.Fprintf(os.Stderr, "Found %d script(s) in execution order:\n\n", len(sortedScripts))
@@ -242,22 +252,44 @@ func runScriptsExecute(cmd *cobra.Command, args []string) error {
fmt.Fprintf(os.Stderr, " ✓ Found %d script(s)\n\n", len(schema.Scripts))
// Step 2: Execute scripts
fmt.Fprintf(os.Stderr, "[2/2] Executing scripts in order (Priority → Sequence)...\n\n")
fmt.Fprintf(os.Stderr, "[2/2] Executing scripts in order (Priority → Sequence → Name)...\n\n")
writer := sqlexec.NewWriter(&writers.WriterOptions{
Metadata: map[string]any{
"connection_string": scriptsConn,
"ignore_errors": scriptsIgnoreErrors,
},
})
if err := writer.WriteSchema(schema); err != nil {
fmt.Fprintf(os.Stderr, "\n")
return fmt.Errorf("execution failed: %w", err)
return fmt.Errorf("script execution failed: %w", err)
}
// Get execution results from writer metadata
totalCount := len(schema.Scripts)
successCount := totalCount
failedCount := 0
opts := writer.Options()
if total, exists := opts.Metadata["execution_total"].(int); exists {
totalCount = total
}
if success, exists := opts.Metadata["execution_success"].(int); exists {
successCount = success
}
if failed, exists := opts.Metadata["execution_failed"].(int); exists {
failedCount = failed
}
fmt.Fprintf(os.Stderr, "\n=== Execution Complete ===\n")
fmt.Fprintf(os.Stderr, "Completed at: %s\n", getCurrentTimestamp())
fmt.Fprintf(os.Stderr, "Successfully executed %d script(s)\n\n", len(schema.Scripts))
fmt.Fprintf(os.Stderr, "Total scripts: %d\n", totalCount)
fmt.Fprintf(os.Stderr, "Successful: %d\n", successCount)
if failedCount > 0 {
fmt.Fprintf(os.Stderr, "Failed: %d\n", failedCount)
}
fmt.Fprintf(os.Stderr, "\n")
return nil
}

View File

@@ -4,31 +4,31 @@ import "strings"
var GoToStdTypes = map[string]string{
"bool": "boolean",
"int64": "integer",
"int64": "bigint",
"int": "integer",
"int8": "integer",
"int16": "integer",
"int8": "smallint",
"int16": "smallint",
"int32": "integer",
"uint": "integer",
"uint8": "integer",
"uint16": "integer",
"uint8": "smallint",
"uint16": "smallint",
"uint32": "integer",
"uint64": "integer",
"uintptr": "integer",
"znullint64": "integer",
"uint64": "bigint",
"uintptr": "bigint",
"znullint64": "bigint",
"znullint32": "integer",
"znullbyte": "integer",
"znullbyte": "smallint",
"float64": "double",
"float32": "double",
"complex64": "double",
"complex128": "double",
"customfloat64": "double",
"string": "string",
"Pointer": "integer",
"string": "text",
"Pointer": "bigint",
"[]byte": "blob",
"customdate": "string",
"customtime": "string",
"customtimestamp": "string",
"customdate": "date",
"customtime": "time",
"customtimestamp": "timestamp",
"sqlfloat64": "double",
"sqlfloat16": "double",
"sqluuid": "uuid",
@@ -36,9 +36,9 @@ var GoToStdTypes = map[string]string{
"sqljson": "json",
"sqlint64": "bigint",
"sqlint32": "integer",
"sqlint16": "integer",
"sqlint16": "smallint",
"sqlbool": "boolean",
"sqlstring": "string",
"sqlstring": "text",
"nullablejsonb": "jsonb",
"nullablejson": "json",
"nullableuuid": "uuid",
@@ -67,7 +67,7 @@ var GoToPGSQLTypes = map[string]string{
"float32": "real",
"complex64": "double precision",
"complex128": "double precision",
"customfloat64": "double precisio",
"customfloat64": "double precision",
"string": "text",
"Pointer": "bigint",
"[]byte": "bytea",
@@ -81,9 +81,9 @@ var GoToPGSQLTypes = map[string]string{
"sqljson": "json",
"sqlint64": "bigint",
"sqlint32": "integer",
"sqlint16": "integer",
"sqlint16": "smallint",
"sqlbool": "boolean",
"sqlstring": "string",
"sqlstring": "text",
"nullablejsonb": "jsonb",
"nullablejson": "json",
"nullableuuid": "uuid",

View File

@@ -128,6 +128,46 @@ func (r *Reader) readDirectoryDBML(dirPath string) (*models.Database, error) {
return db, nil
}
// splitIdentifier splits a dotted identifier while respecting quotes
// Handles cases like: "schema.with.dots"."table"."column"
func splitIdentifier(s string) []string {
var parts []string
var current strings.Builder
inQuote := false
quoteChar := byte(0)
for i := 0; i < len(s); i++ {
ch := s[i]
if !inQuote {
switch ch {
case '"', '\'':
inQuote = true
quoteChar = ch
current.WriteByte(ch)
case '.':
if current.Len() > 0 {
parts = append(parts, current.String())
current.Reset()
}
default:
current.WriteByte(ch)
}
} else {
current.WriteByte(ch)
if ch == quoteChar {
inQuote = false
}
}
}
if current.Len() > 0 {
parts = append(parts, current.String())
}
return parts
}
// stripQuotes removes surrounding quotes and comments from an identifier
func stripQuotes(s string) string {
s = strings.TrimSpace(s)
@@ -409,7 +449,9 @@ func (r *Reader) parseDBML(content string) (*models.Database, error) {
// Parse Table definition
if matches := tableRegex.FindStringSubmatch(line); matches != nil {
tableName := matches[1]
parts := strings.Split(tableName, ".")
// Strip comments/notes before parsing to avoid dots in notes
tableName = strings.TrimSpace(regexp.MustCompile(`\s*\[.*?\]\s*`).ReplaceAllString(tableName, ""))
parts := splitIdentifier(tableName)
if len(parts) == 2 {
currentSchema = stripQuotes(parts[0])
@@ -814,7 +856,7 @@ func (r *Reader) parseTableRef(ref string) (schema, table string, columns []stri
}
// Parse schema, table, and optionally column
parts := strings.Split(strings.TrimSpace(ref), ".")
parts := splitIdentifier(strings.TrimSpace(ref))
if len(parts) == 3 {
// Format: "schema"."table"."column"
schema = stripQuotes(parts[0])

View File

@@ -93,6 +93,7 @@ fmt.Printf("Found %d scripts\n", len(schema.Scripts))
## Features
- **Recursive Directory Scanning**: Automatically scans all subdirectories
- **Symlink Skipping**: Symbolic links are automatically skipped (prevents loops and duplicates)
- **Multiple Extensions**: Supports both `.sql` and `.pgsql` files
- **Flexible Naming**: Extract metadata from filename patterns
- **Error Handling**: Validates directory existence and file accessibility
@@ -153,8 +154,9 @@ go test ./pkg/readers/sqldir/
```
Tests include:
- Valid file parsing
- Valid file parsing (underscore and hyphen formats)
- Recursive directory scanning
- Symlink skipping
- Invalid filename handling
- Empty directory handling
- Error conditions

View File

@@ -107,11 +107,20 @@ func (r *Reader) readScripts() ([]*models.Script, error) {
return err
}
// Skip directories
// Don't process directories as files (WalkDir still descends into them recursively)
if d.IsDir() {
return nil
}
// Skip symlinks
info, err := d.Info()
if err != nil {
return err
}
if info.Mode()&os.ModeSymlink != 0 {
return nil
}
// Get filename
filename := d.Name()

View File

@@ -373,3 +373,65 @@ func TestReader_MixedFormat(t *testing.T) {
}
}
}
func TestReader_SkipSymlinks(t *testing.T) {
// Create temporary test directory
tempDir, err := os.MkdirTemp("", "sqldir-test-symlink-*")
if err != nil {
t.Fatalf("Failed to create temp directory: %v", err)
}
defer os.RemoveAll(tempDir)
// Create a real SQL file
realFile := filepath.Join(tempDir, "1_001_real_file.sql")
if err := os.WriteFile(realFile, []byte("SELECT 1;"), 0644); err != nil {
t.Fatalf("Failed to create real file: %v", err)
}
// Create another file to link to
targetFile := filepath.Join(tempDir, "2_001_target.sql")
if err := os.WriteFile(targetFile, []byte("SELECT 2;"), 0644); err != nil {
t.Fatalf("Failed to create target file: %v", err)
}
// Create a symlink to the target file (this should be skipped)
symlinkFile := filepath.Join(tempDir, "3_001_symlink.sql")
if err := os.Symlink(targetFile, symlinkFile); err != nil {
// Skip test on systems that don't support symlinks (e.g., Windows without admin)
t.Skipf("Symlink creation not supported: %v", err)
}
// Create reader
reader := NewReader(&readers.ReaderOptions{
FilePath: tempDir,
})
// Read database
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase failed: %v", err)
}
schema := db.Schemas[0]
// Should only have 2 scripts (real_file and target), symlink should be skipped
if len(schema.Scripts) != 2 {
t.Errorf("Expected 2 scripts (symlink should be skipped), got %d", len(schema.Scripts))
}
// Verify the scripts are the real files, not the symlink
scriptNames := make(map[string]bool)
for _, script := range schema.Scripts {
scriptNames[script.Name] = true
}
if !scriptNames["real_file"] {
t.Error("Expected 'real_file' script to be present")
}
if !scriptNames["target"] {
t.Error("Expected 'target' script to be present")
}
if scriptNames["symlink"] {
t.Error("Symlink script should have been skipped but was found")
}
}

View File

@@ -0,0 +1,217 @@
# PostgreSQL Naming Conventions
Standardized naming rules for all database objects in RelSpec PostgreSQL output.
## Quick Reference
| Object Type | Prefix | Format | Example |
| ----------------- | ----------- | ---------------------------------- | ------------------------ |
| Primary Key | `pk_` | `pk_<schema>_<table>` | `pk_public_users` |
| Foreign Key | `fk_` | `fk_<table>_<referenced_table>` | `fk_posts_users` |
| Unique Constraint | `uk_` | `uk_<table>_<column>` | `uk_users_email` |
| Unique Index | `uidx_` | `uidx_<table>_<column>` | `uidx_users_email` |
| Regular Index | `idx_` | `idx_<table>_<column>` | `idx_posts_user_id` |
| Check Constraint | `chk_` | `chk_<table>_<constraint_purpose>` | `chk_users_age_positive` |
| Sequence | `identity_` | `identity_<table>_<column>` | `identity_users_id` |
| Trigger | `t_` | `t_<purpose>_<table>` | `t_audit_users` |
| Trigger Function | `tf_` | `tf_<purpose>_<table>` | `tf_audit_users` |
## Naming Rules by Object Type
### Primary Keys
**Pattern:** `pk_<schema>_<table>`
- Include schema name to avoid collisions across schemas
- Use lowercase, snake_case format
- Examples:
- `pk_public_users`
- `pk_audit_audit_log`
- `pk_staging_temp_data`
### Foreign Keys
**Pattern:** `fk_<table>_<referenced_table>`
- Reference the table containing the FK followed by the referenced table
- Use lowercase, snake_case format
- Do NOT include column names in standard FK constraints
- Examples:
- `fk_posts_users` (posts.user_id → users.id)
- `fk_comments_posts` (comments.post_id → posts.id)
- `fk_order_items_orders` (order_items.order_id → orders.id)
### Unique Constraints
**Pattern:** `uk_<table>_<column>`
- Use `uk_` prefix strictly for database constraints (CONSTRAINT type)
- Include column name for clarity
- Examples:
- `uk_users_email`
- `uk_users_username`
- `uk_products_sku`
### Unique Indexes
**Pattern:** `uidx_<table>_<column>`
- Use `uidx_` prefix strictly for index type objects
- Distinguished from constraints for clarity and implementation flexibility
- Examples:
- `uidx_users_email`
- `uidx_sessions_token`
- `uidx_api_keys_key`
### Regular Indexes
**Pattern:** `idx_<table>_<column>`
- Standard indexes for query optimization
- Single column: `idx_<table>_<column>`
- Examples:
- `idx_posts_user_id`
- `idx_orders_created_at`
- `idx_users_status`
### Check Constraints
**Pattern:** `chk_<table>_<constraint_purpose>`
- Describe the constraint validation purpose
- Use lowercase, snake_case for the purpose
- Examples:
- `chk_users_age_positive` (CHECK (age > 0))
- `chk_orders_quantity_positive` (CHECK (quantity > 0))
- `chk_products_price_valid` (CHECK (price >= 0))
- `chk_users_status_enum` (CHECK (status IN ('active', 'inactive')))
### Sequences
**Pattern:** `identity_<table>_<column>`
- Used for SERIAL/IDENTITY columns
- Explicitly named for clarity and management
- Examples:
- `identity_users_id`
- `identity_posts_id`
- `identity_transactions_id`
### Triggers
**Pattern:** `t_<purpose>_<table>`
- Include purpose before table name
- Lowercase, snake_case format
- Examples:
- `t_audit_users` (audit trigger on users table)
- `t_update_timestamp_posts` (timestamp update trigger on posts)
- `t_validate_orders` (validation trigger on orders)
### Trigger Functions
**Pattern:** `tf_<purpose>_<table>`
- Pair with trigger naming convention
- Use `tf_` prefix to distinguish from triggers themselves
- Examples:
- `tf_audit_users` (function for t_audit_users)
- `tf_update_timestamp_posts` (function for t_update_timestamp_posts)
- `tf_validate_orders` (function for t_validate_orders)
## Multi-Column Objects
### Composite Primary Keys
**Pattern:** `pk_<schema>_<table>`
- Same as single-column PKs
- Example: `pk_public_order_items` (composite key on order_id + item_id)
### Composite Unique Constraints
**Pattern:** `uk_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- Examples:
- `uk_users_email_domain` (UNIQUE(email, domain))
- `uk_inventory_warehouse_sku` (UNIQUE(warehouse_id, sku))
### Composite Unique Indexes
**Pattern:** `uidx_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- Examples:
- `uidx_users_first_name_last_name` (UNIQUE INDEX on first_name, last_name)
- `uidx_sessions_user_id_device_id` (UNIQUE INDEX on user_id, device_id)
### Composite Regular Indexes
**Pattern:** `idx_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- List columns in typical query filter order
- Examples:
- `idx_orders_user_id_created_at` (filter by user, then sort by created_at)
- `idx_logs_level_timestamp` (filter by level, then by timestamp)
## Special Cases & Conventions
### Audit Trail Tables
- Audit table naming: `<original_table>_audit` or `audit_<original_table>`
- Audit indexes follow standard pattern: `idx_<audit_table>_<column>`
- Examples:
- Users table audit: `users_audit` with `idx_users_audit_tablename`, `idx_users_audit_changedate`
- Posts table audit: `posts_audit` with `idx_posts_audit_tablename`, `idx_posts_audit_changedate`
### Temporal/Versioning Tables
- Use suffix `_history` or `_versions` if needed
- Apply standard naming rules with the full table name
- Examples:
- `idx_users_history_user_id`
- `uk_posts_versions_version_number`
### Schema-Specific Objects
- Always qualify with schema when needed: `pk_<schema>_<table>`
- Multiple schemas allowed: `pk_public_users`, `pk_staging_users`
### Reserved Words & Special Names
- Avoid PostgreSQL reserved keywords in object names
- If column/table names conflict, use quoted identifiers in DDL
- Naming convention rules still apply to the logical name
### Generated/Anonymous Indexes
- If an index lacks explicit naming, default to: `idx_<schema>_<table>`
- Should be replaced with explicit names following standards
- Examples (to be renamed):
- `idx_public_users` → should be `idx_users_<column>`
## Implementation Notes
### Code Generation
- Names are always lowercase in generated SQL
- Underscore separators are required
### Migration Safety
- Do NOT rename objects after creation without explicit migration
- Names should be consistent across all schema versions
- Test generated DDL against PostgreSQL before deployment
### Testing
- Ensure consistency across all table and constraint generation
- Test with reserved words to verify escaping
## Related Documentation
- PostgreSQL Identifier Rules: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-IDENTIFIERS
- Constraint Documentation: https://www.postgresql.org/docs/current/ddl-constraints.html
- Index Documentation: https://www.postgresql.org/docs/current/indexes.html

View File

@@ -8,6 +8,7 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
@@ -335,7 +336,7 @@ func (w *MigrationWriter) generateAlterTableScripts(schema *models.Schema, model
SchemaName: schema.Name,
TableName: modelTable.Name,
ColumnName: modelCol.Name,
ColumnType: modelCol.Type,
ColumnType: pgsql.ConvertSQLType(modelCol.Type),
Default: defaultVal,
NotNull: modelCol.NotNull,
})
@@ -359,7 +360,7 @@ func (w *MigrationWriter) generateAlterTableScripts(schema *models.Schema, model
SchemaName: schema.Name,
TableName: modelTable.Name,
ColumnName: modelCol.Name,
NewType: modelCol.Type,
NewType: pgsql.ConvertSQLType(modelCol.Type),
})
if err != nil {
return nil, err
@@ -476,7 +477,7 @@ func (w *MigrationWriter) generateIndexScripts(model *models.Schema, current *mo
}
if len(pkColumns) > 0 {
sort.Strings(pkColumns)
constraintName := fmt.Sprintf("pk_%s_%s", strings.ToLower(model.Name), strings.ToLower(modelTable.Name))
constraintName := fmt.Sprintf("pk_%s_%s", model.SQLName(), modelTable.SQLName())
shouldCreate := true
if currentTable != nil {
@@ -752,7 +753,7 @@ func (w *MigrationWriter) generateAuditScripts(schema *models.Schema, auditConfi
}
// Generate audit function
funcName := fmt.Sprintf("ft_audit_%s", table.Name)
funcName := fmt.Sprintf("tf_audit_%s", table.Name)
funcData := BuildAuditFunctionData(schema.Name, table, pk, config, auditSchema, auditConfig.UserFunction)
funcSQL, err := w.executor.ExecuteAuditFunction(funcData)

View File

@@ -121,7 +121,7 @@ func TestWriteMigration_WithAudit(t *testing.T) {
}
// Verify audit function
if !strings.Contains(output, "CREATE OR REPLACE FUNCTION public.ft_audit_users()") {
if !strings.Contains(output, "CREATE OR REPLACE FUNCTION public.tf_audit_users()") {
t.Error("Migration missing audit function")
}
@@ -177,7 +177,7 @@ func TestTemplateExecutor_AuditFunction(t *testing.T) {
data := AuditFunctionData{
SchemaName: "public",
FunctionName: "ft_audit_users",
FunctionName: "tf_audit_users",
TableName: "users",
TablePrefix: "NULL",
PrimaryKey: "id",
@@ -202,7 +202,7 @@ func TestTemplateExecutor_AuditFunction(t *testing.T) {
t.Logf("Generated SQL:\n%s", sql)
if !strings.Contains(sql, "CREATE OR REPLACE FUNCTION public.ft_audit_users()") {
if !strings.Contains(sql, "CREATE OR REPLACE FUNCTION public.tf_audit_users()") {
t.Error("SQL missing function definition")
}
if !strings.Contains(sql, "IF TG_OP = 'INSERT'") {

View File

@@ -355,7 +355,7 @@ func BuildAuditFunctionData(
auditSchema string,
userFunction string,
) AuditFunctionData {
funcName := fmt.Sprintf("ft_audit_%s", table.Name)
funcName := fmt.Sprintf("tf_audit_%s", table.Name)
// Build list of audited columns
auditedColumns := make([]*models.Column, 0)

View File

@@ -13,6 +13,7 @@ import (
"github.com/jackc/pgx/v5"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
@@ -167,6 +168,13 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
statements = append(statements, stmts...)
}
// Phase 3.5: Add missing columns (for existing tables)
addColStmts, err := w.GenerateAddColumnStatements(schema)
if err != nil {
return nil, fmt.Errorf("failed to generate add column statements: %w", err)
}
statements = append(statements, addColStmts...)
// Phase 4: Primary keys
for _, table := range schema.Tables {
// First check for explicit PrimaryKeyConstraint
@@ -178,28 +186,68 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
}
}
var pkColumns []string
var pkName string
if pkConstraint != nil {
stmt := fmt.Sprintf("ALTER TABLE %s.%s ADD CONSTRAINT %s PRIMARY KEY (%s)",
schema.SQLName(), table.SQLName(), pkConstraint.Name, strings.Join(pkConstraint.Columns, ", "))
statements = append(statements, stmt)
pkColumns = pkConstraint.Columns
pkName = pkConstraint.Name
} else {
// No explicit constraint, check for columns with IsPrimaryKey = true
pkColumns := []string{}
pkCols := []string{}
for _, col := range table.Columns {
if col.IsPrimaryKey {
pkColumns = append(pkColumns, col.SQLName())
pkCols = append(pkCols, col.SQLName())
}
}
if len(pkColumns) > 0 {
if len(pkCols) > 0 {
// Sort for consistent output
sort.Strings(pkColumns)
pkName := fmt.Sprintf("pk_%s_%s", schema.SQLName(), table.SQLName())
stmt := fmt.Sprintf("ALTER TABLE %s.%s ADD CONSTRAINT %s PRIMARY KEY (%s)",
sort.Strings(pkCols)
pkColumns = pkCols
pkName = fmt.Sprintf("pk_%s_%s", schema.SQLName(), table.SQLName())
}
}
if len(pkColumns) > 0 {
// Auto-generated primary key names to check for and drop
autoGenPKNames := []string{
fmt.Sprintf("%s_pkey", table.Name),
fmt.Sprintf("%s_%s_pkey", schema.Name, table.Name),
}
// Wrap in DO block to drop auto-generated PK and add our named PK
stmt := fmt.Sprintf("DO $$\nDECLARE\n"+
" auto_pk_name text;\n"+
"BEGIN\n"+
" -- Drop auto-generated primary key if it exists\n"+
" SELECT constraint_name INTO auto_pk_name\n"+
" FROM information_schema.table_constraints\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND constraint_type = 'PRIMARY KEY'\n"+
" AND constraint_name IN (%s);\n"+
"\n"+
" IF auto_pk_name IS NOT NULL THEN\n"+
" EXECUTE 'ALTER TABLE %s.%s DROP CONSTRAINT ' || quote_ident(auto_pk_name);\n"+
" END IF;\n"+
"\n"+
" -- Add named primary key if it doesn't exist\n"+
" IF NOT EXISTS (\n"+
" SELECT 1 FROM information_schema.table_constraints\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND constraint_name = '%s'\n"+
" ) THEN\n"+
" ALTER TABLE %s.%s ADD CONSTRAINT %s PRIMARY KEY (%s);\n"+
" END IF;\n"+
"END;\n$$",
schema.Name, table.Name, formatStringList(autoGenPKNames),
schema.SQLName(), table.SQLName(),
schema.Name, table.Name, pkName,
schema.SQLName(), table.SQLName(), pkName, strings.Join(pkColumns, ", "))
statements = append(statements, stmt)
}
}
}
// Phase 5: Indexes
for _, table := range schema.Tables {
@@ -247,6 +295,31 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
}
}
// Phase 5.5: Unique constraints
for _, table := range schema.Tables {
for _, constraint := range table.Constraints {
if constraint.Type != models.UniqueConstraint {
continue
}
// Wrap in DO block to check for existing constraint
stmt := fmt.Sprintf("DO $$\nBEGIN\n"+
" IF NOT EXISTS (\n"+
" SELECT 1 FROM information_schema.table_constraints\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND constraint_name = '%s'\n"+
" ) THEN\n"+
" ALTER TABLE %s.%s ADD CONSTRAINT %s UNIQUE (%s);\n"+
" END IF;\n"+
"END;\n$$",
schema.Name, table.Name, constraint.Name,
schema.SQLName(), table.SQLName(), constraint.Name,
strings.Join(constraint.Columns, ", "))
statements = append(statements, stmt)
}
}
// Phase 6: Foreign keys
for _, table := range schema.Tables {
for _, constraint := range table.Constraints {
@@ -269,7 +342,18 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
onUpdate = "NO ACTION"
}
stmt := fmt.Sprintf("ALTER TABLE %s.%s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s.%s(%s) ON DELETE %s ON UPDATE %s",
// Wrap in DO block to check for existing constraint
stmt := fmt.Sprintf("DO $$\nBEGIN\n"+
" IF NOT EXISTS (\n"+
" SELECT 1 FROM information_schema.table_constraints\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND constraint_name = '%s'\n"+
" ) THEN\n"+
" ALTER TABLE %s.%s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s.%s(%s) ON DELETE %s ON UPDATE %s;\n"+
" END IF;\n"+
"END;\n$$",
schema.Name, table.Name, constraint.Name,
schema.SQLName(), table.SQLName(), constraint.Name,
strings.Join(constraint.Columns, ", "),
strings.ToLower(refSchema), strings.ToLower(constraint.ReferencedTable),
@@ -299,6 +383,68 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
return statements, nil
}
// GenerateAddColumnStatements generates ALTER TABLE ADD COLUMN statements for existing tables
// This is useful for schema evolution when new columns are added to existing tables
func (w *Writer) GenerateAddColumnStatements(schema *models.Schema) ([]string, error) {
statements := []string{}
statements = append(statements, fmt.Sprintf("-- Add missing columns for schema: %s", schema.Name))
for _, table := range schema.Tables {
// Sort columns by sequence or name for consistent output
columns := make([]*models.Column, 0, len(table.Columns))
for _, col := range table.Columns {
columns = append(columns, col)
}
sort.Slice(columns, func(i, j int) bool {
if columns[i].Sequence != columns[j].Sequence {
return columns[i].Sequence < columns[j].Sequence
}
return columns[i].Name < columns[j].Name
})
for _, col := range columns {
colDef := w.generateColumnDefinition(col)
// Generate DO block that checks if column exists before adding
stmt := fmt.Sprintf("DO $$\nBEGIN\n"+
" IF NOT EXISTS (\n"+
" SELECT 1 FROM information_schema.columns\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND column_name = '%s'\n"+
" ) THEN\n"+
" ALTER TABLE %s.%s ADD COLUMN %s;\n"+
" END IF;\n"+
"END;\n$$",
schema.Name, table.Name, col.Name,
schema.SQLName(), table.SQLName(), colDef)
statements = append(statements, stmt)
}
}
return statements, nil
}
// GenerateAddColumnsForDatabase generates ALTER TABLE ADD COLUMN statements for the entire database
func (w *Writer) GenerateAddColumnsForDatabase(db *models.Database) ([]string, error) {
statements := []string{}
statements = append(statements, "-- Add missing columns to existing tables")
statements = append(statements, fmt.Sprintf("-- Database: %s", db.Name))
statements = append(statements, "-- Generated by RelSpec")
for _, schema := range db.Schemas {
schemaStatements, err := w.GenerateAddColumnStatements(schema)
if err != nil {
return nil, fmt.Errorf("failed to generate add column statements for schema %s: %w", schema.Name, err)
}
statements = append(statements, schemaStatements...)
}
return statements, nil
}
// generateCreateTableStatement generates CREATE TABLE statement
func (w *Writer) generateCreateTableStatement(schema *models.Schema, table *models.Table) ([]string, error) {
statements := []string{}
@@ -321,7 +467,7 @@ func (w *Writer) generateCreateTableStatement(schema *models.Schema, table *mode
columnDefs = append(columnDefs, " "+def)
}
stmt := fmt.Sprintf("CREATE TABLE %s.%s (\n%s\n)",
stmt := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.%s (\n%s\n)",
schema.SQLName(), table.SQLName(), strings.Join(columnDefs, ",\n"))
statements = append(statements, stmt)
@@ -332,16 +478,28 @@ func (w *Writer) generateCreateTableStatement(schema *models.Schema, table *mode
func (w *Writer) generateColumnDefinition(col *models.Column) string {
parts := []string{col.SQLName()}
// Type with length/precision
typeStr := col.Type
// Type with length/precision - convert to valid PostgreSQL type
baseType := pgsql.ConvertSQLType(col.Type)
typeStr := baseType
// Only add size specifiers for types that support them
if col.Length > 0 && col.Precision == 0 {
typeStr = fmt.Sprintf("%s(%d)", col.Type, col.Length)
} else if col.Precision > 0 {
if col.Scale > 0 {
typeStr = fmt.Sprintf("%s(%d,%d)", col.Type, col.Precision, col.Scale)
} else {
typeStr = fmt.Sprintf("%s(%d)", col.Type, col.Precision)
if supportsLength(baseType) {
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Length)
} else if isTextTypeWithoutLength(baseType) {
// Convert text with length to varchar
typeStr = fmt.Sprintf("varchar(%d)", col.Length)
}
// For types that don't support length (integer, bigint, etc.), ignore the length
} else if col.Precision > 0 {
if supportsPrecision(baseType) {
if col.Scale > 0 {
typeStr = fmt.Sprintf("%s(%d,%d)", baseType, col.Precision, col.Scale)
} else {
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Precision)
}
}
// For types that don't support precision, ignore it
}
parts = append(parts, typeStr)
@@ -394,6 +552,11 @@ func (w *Writer) WriteSchema(schema *models.Schema) error {
return err
}
// Phase 3.5: Add missing columns (priority 120)
if err := w.writeAddColumns(schema); err != nil {
return err
}
// Phase 4: Create primary keys (priority 160)
if err := w.writePrimaryKeys(schema); err != nil {
return err
@@ -404,6 +567,11 @@ func (w *Writer) WriteSchema(schema *models.Schema) error {
return err
}
// Phase 5.5: Create unique constraints (priority 185)
if err := w.writeUniqueConstraints(schema); err != nil {
return err
}
// Phase 6: Create foreign key constraints (priority 195)
if err := w.writeForeignKeys(schema); err != nil {
return err
@@ -435,6 +603,44 @@ func (w *Writer) WriteTable(table *models.Table) error {
return w.WriteSchema(schema)
}
// WriteAddColumnStatements writes ALTER TABLE ADD COLUMN statements for a database
// This is used for schema evolution/migration when new columns are added
func (w *Writer) WriteAddColumnStatements(db *models.Database) error {
var writer io.Writer
var file *os.File
var err error
// Use existing writer if already set (for testing)
if w.writer != nil {
writer = w.writer
} else if w.options.OutputPath != "" {
// Determine output destination
file, err = os.Create(w.options.OutputPath)
if err != nil {
return fmt.Errorf("failed to create output file: %w", err)
}
defer file.Close()
writer = file
} else {
writer = os.Stdout
}
w.writer = writer
// Generate statements
statements, err := w.GenerateAddColumnsForDatabase(db)
if err != nil {
return err
}
// Write each statement
for _, stmt := range statements {
fmt.Fprintf(w.writer, "%s;\n\n", stmt)
}
return nil
}
// writeCreateSchema generates CREATE SCHEMA statement
func (w *Writer) writeCreateSchema(schema *models.Schema) error {
if schema.Name == "public" {
@@ -488,15 +694,8 @@ func (w *Writer) writeCreateTables(schema *models.Schema) error {
columnDefs := make([]string, 0, len(columns))
for _, col := range columns {
colDef := fmt.Sprintf(" %s %s", col.SQLName(), col.Type)
// Add default value if present
if col.Default != nil && col.Default != "" {
// Strip backticks - DBML uses them for SQL expressions but PostgreSQL doesn't
defaultVal := fmt.Sprintf("%v", col.Default)
colDef += fmt.Sprintf(" DEFAULT %s", stripBackticks(defaultVal))
}
// Use generateColumnDefinition to properly handle type, length, precision, and defaults
colDef := " " + w.generateColumnDefinition(col)
columnDefs = append(columnDefs, colDef)
}
@@ -507,6 +706,35 @@ func (w *Writer) writeCreateTables(schema *models.Schema) error {
return nil
}
// writeAddColumns generates ALTER TABLE ADD COLUMN statements for missing columns
func (w *Writer) writeAddColumns(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Add missing columns for schema: %s\n", schema.Name)
for _, table := range schema.Tables {
// Sort columns by sequence or name for consistent output
columns := getSortedColumns(table.Columns)
for _, col := range columns {
colDef := w.generateColumnDefinition(col)
// Generate DO block that checks if column exists before adding
fmt.Fprintf(w.writer, "DO $$\nBEGIN\n")
fmt.Fprintf(w.writer, " IF NOT EXISTS (\n")
fmt.Fprintf(w.writer, " SELECT 1 FROM information_schema.columns\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
fmt.Fprintf(w.writer, " AND table_name = '%s'\n", table.Name)
fmt.Fprintf(w.writer, " AND column_name = '%s'\n", col.Name)
fmt.Fprintf(w.writer, " ) THEN\n")
fmt.Fprintf(w.writer, " ALTER TABLE %s.%s ADD COLUMN %s;\n",
schema.SQLName(), table.SQLName(), colDef)
fmt.Fprintf(w.writer, " END IF;\n")
fmt.Fprintf(w.writer, "END;\n$$;\n\n")
}
}
return nil
}
// writePrimaryKeys generates ALTER TABLE statements for primary keys
func (w *Writer) writePrimaryKeys(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Primary keys for schema: %s\n", schema.Name)
@@ -548,7 +776,32 @@ func (w *Writer) writePrimaryKeys(schema *models.Schema) error {
continue
}
fmt.Fprintf(w.writer, "DO $$\nBEGIN\n")
// Auto-generated primary key names to check for and drop
autoGenPKNames := []string{
fmt.Sprintf("%s_pkey", table.Name),
fmt.Sprintf("%s_%s_pkey", schema.Name, table.Name),
}
fmt.Fprintf(w.writer, "DO $$\nDECLARE\n")
fmt.Fprintf(w.writer, " auto_pk_name text;\nBEGIN\n")
// Check for and drop auto-generated primary keys
fmt.Fprintf(w.writer, " -- Drop auto-generated primary key if it exists\n")
fmt.Fprintf(w.writer, " SELECT constraint_name INTO auto_pk_name\n")
fmt.Fprintf(w.writer, " FROM information_schema.table_constraints\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
fmt.Fprintf(w.writer, " AND table_name = '%s'\n", table.Name)
fmt.Fprintf(w.writer, " AND constraint_type = 'PRIMARY KEY'\n")
fmt.Fprintf(w.writer, " AND constraint_name IN (%s);\n", formatStringList(autoGenPKNames))
fmt.Fprintf(w.writer, "\n")
fmt.Fprintf(w.writer, " IF auto_pk_name IS NOT NULL THEN\n")
fmt.Fprintf(w.writer, " EXECUTE 'ALTER TABLE %s.%s DROP CONSTRAINT ' || quote_ident(auto_pk_name);\n",
schema.SQLName(), table.SQLName())
fmt.Fprintf(w.writer, " END IF;\n")
fmt.Fprintf(w.writer, "\n")
// Add our named primary key if it doesn't exist
fmt.Fprintf(w.writer, " -- Add named primary key if it doesn't exist\n")
fmt.Fprintf(w.writer, " IF NOT EXISTS (\n")
fmt.Fprintf(w.writer, " SELECT 1 FROM information_schema.table_constraints\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
@@ -590,9 +843,10 @@ func (w *Writer) writeIndexes(schema *models.Schema) error {
if indexName == "" {
indexType := "idx"
if index.Unique {
indexType = "uk"
indexType = "uidx"
}
indexName = fmt.Sprintf("%s_%s_%s", indexType, schema.SQLName(), table.SQLName())
columnSuffix := strings.Join(index.Columns, "_")
indexName = fmt.Sprintf("%s_%s_%s", indexType, table.SQLName(), strings.ToLower(columnSuffix))
}
// Build column list with operator class support for GIN indexes
@@ -641,6 +895,55 @@ func (w *Writer) writeIndexes(schema *models.Schema) error {
return nil
}
// writeUniqueConstraints generates ALTER TABLE statements for unique constraints
func (w *Writer) writeUniqueConstraints(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Unique constraints for schema: %s\n", schema.Name)
for _, table := range schema.Tables {
// Sort constraints by name for consistent output
constraintNames := make([]string, 0, len(table.Constraints))
for name, constraint := range table.Constraints {
if constraint.Type == models.UniqueConstraint {
constraintNames = append(constraintNames, name)
}
}
sort.Strings(constraintNames)
for _, name := range constraintNames {
constraint := table.Constraints[name]
// Build column list
columnExprs := make([]string, 0, len(constraint.Columns))
for _, colName := range constraint.Columns {
if col, ok := table.Columns[colName]; ok {
columnExprs = append(columnExprs, col.SQLName())
}
}
if len(columnExprs) == 0 {
continue
}
// Wrap in DO block to check for existing constraint
fmt.Fprintf(w.writer, "DO $$\n")
fmt.Fprintf(w.writer, "BEGIN\n")
fmt.Fprintf(w.writer, " IF NOT EXISTS (\n")
fmt.Fprintf(w.writer, " SELECT 1 FROM information_schema.table_constraints\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
fmt.Fprintf(w.writer, " AND table_name = '%s'\n", table.Name)
fmt.Fprintf(w.writer, " AND constraint_name = '%s'\n", constraint.Name)
fmt.Fprintf(w.writer, " ) THEN\n")
fmt.Fprintf(w.writer, " ALTER TABLE %s.%s ADD CONSTRAINT %s UNIQUE (%s);\n",
schema.SQLName(), table.SQLName(), constraint.Name, strings.Join(columnExprs, ", "))
fmt.Fprintf(w.writer, " END IF;\n")
fmt.Fprintf(w.writer, "END;\n")
fmt.Fprintf(w.writer, "$$;\n\n")
}
}
return nil
}
// writeForeignKeys generates ALTER TABLE statements for foreign keys
func (w *Writer) writeForeignKeys(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Foreign keys for schema: %s\n", schema.Name)
@@ -708,13 +1011,6 @@ func (w *Writer) writeForeignKeys(schema *models.Schema) error {
onUpdate = strings.ToUpper(fkConstraint.OnUpdate)
}
fmt.Fprintf(w.writer, "ALTER TABLE %s.%s\n", schema.SQLName(), table.SQLName())
fmt.Fprintf(w.writer, " DROP CONSTRAINT IF EXISTS %s;\n", fkName)
fmt.Fprintf(w.writer, "\n")
fmt.Fprintf(w.writer, "ALTER TABLE %s.%s\n", schema.SQLName(), table.SQLName())
fmt.Fprintf(w.writer, " ADD CONSTRAINT %s\n", fkName)
fmt.Fprintf(w.writer, " FOREIGN KEY (%s)\n", strings.Join(sourceColumns, ", "))
// Use constraint's referenced schema/table or relationship's ToSchema/ToTable
refSchema := fkConstraint.ReferencedSchema
if refSchema == "" {
@@ -725,11 +1021,24 @@ func (w *Writer) writeForeignKeys(schema *models.Schema) error {
refTable = rel.ToTable
}
// Use DO block to check if constraint exists before adding
fmt.Fprintf(w.writer, "DO $$\nBEGIN\n")
fmt.Fprintf(w.writer, " IF NOT EXISTS (\n")
fmt.Fprintf(w.writer, " SELECT 1 FROM information_schema.table_constraints\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
fmt.Fprintf(w.writer, " AND table_name = '%s'\n", table.Name)
fmt.Fprintf(w.writer, " AND constraint_name = '%s'\n", fkName)
fmt.Fprintf(w.writer, " ) THEN\n")
fmt.Fprintf(w.writer, " ALTER TABLE %s.%s\n", schema.SQLName(), table.SQLName())
fmt.Fprintf(w.writer, " ADD CONSTRAINT %s\n", fkName)
fmt.Fprintf(w.writer, " FOREIGN KEY (%s)\n", strings.Join(sourceColumns, ", "))
fmt.Fprintf(w.writer, " REFERENCES %s.%s (%s)\n",
refSchema, refTable, strings.Join(targetColumns, ", "))
fmt.Fprintf(w.writer, " ON DELETE %s\n", onDelete)
fmt.Fprintf(w.writer, " ON UPDATE %s\n", onUpdate)
fmt.Fprintf(w.writer, " DEFERRABLE;\n\n")
fmt.Fprintf(w.writer, " DEFERRABLE;\n")
fmt.Fprintf(w.writer, " END IF;\n")
fmt.Fprintf(w.writer, "END;\n$$;\n\n")
}
}
@@ -841,6 +1150,44 @@ func isTextType(colType string) bool {
return false
}
// supportsLength checks if a PostgreSQL type supports length specification
func supportsLength(colType string) bool {
lengthTypes := []string{"varchar", "character varying", "char", "character", "bit", "bit varying", "varbit"}
lowerType := strings.ToLower(colType)
for _, t := range lengthTypes {
if lowerType == t || strings.HasPrefix(lowerType, t+"(") {
return true
}
}
return false
}
// supportsPrecision checks if a PostgreSQL type supports precision/scale specification
func supportsPrecision(colType string) bool {
precisionTypes := []string{"numeric", "decimal", "time", "timestamp", "timestamptz", "timestamp with time zone", "timestamp without time zone", "time with time zone", "time without time zone", "interval"}
lowerType := strings.ToLower(colType)
for _, t := range precisionTypes {
if lowerType == t || strings.HasPrefix(lowerType, t+"(") {
return true
}
}
return false
}
// isTextTypeWithoutLength checks if type is text (which should convert to varchar when length is specified)
func isTextTypeWithoutLength(colType string) bool {
return strings.EqualFold(colType, "text")
}
// formatStringList formats a list of strings as a SQL-safe comma-separated quoted list
func formatStringList(items []string) string {
quoted := make([]string, len(items))
for i, item := range items {
quoted[i] = fmt.Sprintf("'%s'", escapeQuote(item))
}
return strings.Join(quoted, ", ")
}
// extractOperatorClass extracts operator class from index comment/note
// Looks for common operator classes like gin_trgm_ops, gist_trgm_ops, etc.
func extractOperatorClass(comment string) string {

View File

@@ -45,11 +45,11 @@ func TestWriteDatabase(t *testing.T) {
// Add unique index
uniqueEmailIndex := &models.Index{
Name: "uk_users_email",
Name: "uidx_users_email",
Unique: true,
Columns: []string{"email"},
}
table.Indexes["uk_users_email"] = uniqueEmailIndex
table.Indexes["uidx_users_email"] = uniqueEmailIndex
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
@@ -164,6 +164,76 @@ func TestWriteForeignKeys(t *testing.T) {
}
}
func TestWriteUniqueConstraints(t *testing.T) {
// Create a test database with unique constraints
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
// Create table with unique constraints
table := models.InitTable("users", "public")
// Add columns
emailCol := models.InitColumn("email", "users", "public")
emailCol.Type = "varchar(255)"
emailCol.NotNull = true
table.Columns["email"] = emailCol
guidCol := models.InitColumn("guid", "users", "public")
guidCol.Type = "uuid"
guidCol.NotNull = true
table.Columns["guid"] = guidCol
// Add unique constraints
emailConstraint := &models.Constraint{
Name: "uq_email",
Type: models.UniqueConstraint,
Schema: "public",
Table: "users",
Columns: []string{"email"},
}
table.Constraints["uq_email"] = emailConstraint
guidConstraint := &models.Constraint{
Name: "uq_guid",
Type: models.UniqueConstraint,
Schema: "public",
Table: "users",
Columns: []string{"guid"},
}
table.Constraints["uq_guid"] = guidConstraint
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
// Create writer with output to buffer
var buf bytes.Buffer
options := &writers.WriterOptions{}
writer := NewWriter(options)
writer.writer = &buf
// Write the database
err := writer.WriteDatabase(db)
if err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
// Print output for debugging
t.Logf("Generated SQL:\n%s", output)
// Verify unique constraints are present
if !strings.Contains(output, "-- Unique constraints for schema: public") {
t.Errorf("Output missing unique constraints header")
}
if !strings.Contains(output, "ADD CONSTRAINT uq_email UNIQUE (email)") {
t.Errorf("Output missing uq_email unique constraint\nFull output:\n%s", output)
}
if !strings.Contains(output, "ADD CONSTRAINT uq_guid UNIQUE (guid)") {
t.Errorf("Output missing uq_guid unique constraint\nFull output:\n%s", output)
}
}
func TestWriteTable(t *testing.T) {
// Create a single table
table := models.InitTable("products", "public")
@@ -241,3 +311,327 @@ func TestIsIntegerType(t *testing.T) {
}
}
}
func TestTypeConversion(t *testing.T) {
// Test that invalid Go types are converted to valid PostgreSQL types
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
// Create a test table with Go types instead of SQL types
table := models.InitTable("test_types", "public")
// Add columns with Go types (invalid for PostgreSQL)
stringCol := models.InitColumn("name", "test_types", "public")
stringCol.Type = "string" // Should be converted to "text"
table.Columns["name"] = stringCol
int64Col := models.InitColumn("big_id", "test_types", "public")
int64Col.Type = "int64" // Should be converted to "bigint"
table.Columns["big_id"] = int64Col
int16Col := models.InitColumn("small_id", "test_types", "public")
int16Col.Type = "int16" // Should be converted to "smallint"
table.Columns["small_id"] = int16Col
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
// Create writer with output to buffer
var buf bytes.Buffer
options := &writers.WriterOptions{}
writer := NewWriter(options)
writer.writer = &buf
// Write the database
err := writer.WriteDatabase(db)
if err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
// Print output for debugging
t.Logf("Generated SQL:\n%s", output)
// Verify that Go types were converted to PostgreSQL types
if strings.Contains(output, "string") {
t.Errorf("Output contains 'string' type - should be converted to 'text'\nFull output:\n%s", output)
}
if strings.Contains(output, "int64") {
t.Errorf("Output contains 'int64' type - should be converted to 'bigint'\nFull output:\n%s", output)
}
if strings.Contains(output, "int16") {
t.Errorf("Output contains 'int16' type - should be converted to 'smallint'\nFull output:\n%s", output)
}
// Verify correct PostgreSQL types are present
if !strings.Contains(output, "text") {
t.Errorf("Output missing 'text' type (converted from 'string')\nFull output:\n%s", output)
}
if !strings.Contains(output, "bigint") {
t.Errorf("Output missing 'bigint' type (converted from 'int64')\nFull output:\n%s", output)
}
if !strings.Contains(output, "smallint") {
t.Errorf("Output missing 'smallint' type (converted from 'int16')\nFull output:\n%s", output)
}
}
func TestPrimaryKeyExistenceCheck(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("products", "public")
idCol := models.InitColumn("id", "products", "public")
idCol.Type = "integer"
idCol.IsPrimaryKey = true
table.Columns["id"] = idCol
nameCol := models.InitColumn("name", "products", "public")
nameCol.Type = "text"
table.Columns["name"] = nameCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
options := &writers.WriterOptions{}
writer := NewWriter(options)
writer.writer = &buf
err := writer.WriteDatabase(db)
if err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
t.Logf("Generated SQL:\n%s", output)
// Verify our naming convention is used
if !strings.Contains(output, "pk_public_products") {
t.Errorf("Output missing expected primary key name 'pk_public_products'\nFull output:\n%s", output)
}
// Verify it drops auto-generated primary keys
if !strings.Contains(output, "products_pkey") || !strings.Contains(output, "DROP CONSTRAINT") {
t.Errorf("Output missing logic to drop auto-generated primary key\nFull output:\n%s", output)
}
// Verify it checks for our specific named constraint before adding it
if !strings.Contains(output, "constraint_name = 'pk_public_products'") {
t.Errorf("Output missing check for our named primary key constraint\nFull output:\n%s", output)
}
}
func TestColumnSizeSpecifiers(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("test_sizes", "public")
// Integer with invalid size specifier - should ignore size
integerCol := models.InitColumn("int_col", "test_sizes", "public")
integerCol.Type = "integer"
integerCol.Length = 32
table.Columns["int_col"] = integerCol
// Bigint with invalid size specifier - should ignore size
bigintCol := models.InitColumn("bigint_col", "test_sizes", "public")
bigintCol.Type = "bigint"
bigintCol.Length = 64
table.Columns["bigint_col"] = bigintCol
// Smallint with invalid size specifier - should ignore size
smallintCol := models.InitColumn("smallint_col", "test_sizes", "public")
smallintCol.Type = "smallint"
smallintCol.Length = 16
table.Columns["smallint_col"] = smallintCol
// Text with length - should convert to varchar
textCol := models.InitColumn("text_col", "test_sizes", "public")
textCol.Type = "text"
textCol.Length = 100
table.Columns["text_col"] = textCol
// Varchar with length - should keep varchar with length
varcharCol := models.InitColumn("varchar_col", "test_sizes", "public")
varcharCol.Type = "varchar"
varcharCol.Length = 50
table.Columns["varchar_col"] = varcharCol
// Decimal with precision and scale - should keep them
decimalCol := models.InitColumn("decimal_col", "test_sizes", "public")
decimalCol.Type = "decimal"
decimalCol.Precision = 19
decimalCol.Scale = 4
table.Columns["decimal_col"] = decimalCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
options := &writers.WriterOptions{}
writer := NewWriter(options)
writer.writer = &buf
err := writer.WriteDatabase(db)
if err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
t.Logf("Generated SQL:\n%s", output)
// Verify invalid size specifiers are NOT present
invalidPatterns := []string{
"integer(32)",
"bigint(64)",
"smallint(16)",
"text(100)",
}
for _, pattern := range invalidPatterns {
if strings.Contains(output, pattern) {
t.Errorf("Output contains invalid pattern '%s' - PostgreSQL doesn't support this\nFull output:\n%s", pattern, output)
}
}
// Verify valid patterns ARE present
validPatterns := []string{
"integer", // without size
"bigint", // without size
"smallint", // without size
"varchar(100)", // text converted to varchar with length
"varchar(50)", // varchar with length
"decimal(19,4)", // decimal with precision and scale
}
for _, pattern := range validPatterns {
if !strings.Contains(output, pattern) {
t.Errorf("Output missing expected pattern '%s'\nFull output:\n%s", pattern, output)
}
}
}
func TestGenerateAddColumnStatements(t *testing.T) {
// Create a test database with tables that have new columns
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
// Create a table with columns
table := models.InitTable("users", "public")
// Existing column
idCol := models.InitColumn("id", "users", "public")
idCol.Type = "integer"
idCol.NotNull = true
idCol.Sequence = 1
table.Columns["id"] = idCol
// New column to be added
emailCol := models.InitColumn("email", "users", "public")
emailCol.Type = "varchar"
emailCol.Length = 255
emailCol.NotNull = true
emailCol.Sequence = 2
table.Columns["email"] = emailCol
// New column with default
statusCol := models.InitColumn("status", "users", "public")
statusCol.Type = "text"
statusCol.Default = "active"
statusCol.Sequence = 3
table.Columns["status"] = statusCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
// Create writer
options := &writers.WriterOptions{}
writer := NewWriter(options)
// Generate ADD COLUMN statements
statements, err := writer.GenerateAddColumnsForDatabase(db)
if err != nil {
t.Fatalf("GenerateAddColumnsForDatabase failed: %v", err)
}
// Join all statements to verify content
output := strings.Join(statements, "\n")
t.Logf("Generated ADD COLUMN statements:\n%s", output)
// Verify expected elements
expectedStrings := []string{
"ALTER TABLE public.users ADD COLUMN id integer NOT NULL",
"ALTER TABLE public.users ADD COLUMN email varchar(255) NOT NULL",
"ALTER TABLE public.users ADD COLUMN status text DEFAULT 'active'",
"information_schema.columns",
"table_schema = 'public'",
"table_name = 'users'",
"column_name = 'id'",
"column_name = 'email'",
"column_name = 'status'",
}
for _, expected := range expectedStrings {
if !strings.Contains(output, expected) {
t.Errorf("Output missing expected string: %s\nFull output:\n%s", expected, output)
}
}
// Verify DO blocks are present for conditional adds
doBlockCount := strings.Count(output, "DO $$")
if doBlockCount < 3 {
t.Errorf("Expected at least 3 DO blocks (one per column), got %d", doBlockCount)
}
// Verify IF NOT EXISTS logic
ifNotExistsCount := strings.Count(output, "IF NOT EXISTS")
if ifNotExistsCount < 3 {
t.Errorf("Expected at least 3 IF NOT EXISTS checks (one per column), got %d", ifNotExistsCount)
}
}
func TestWriteAddColumnStatements(t *testing.T) {
// Create a test database
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
// Create a table with a new column to be added
table := models.InitTable("products", "public")
idCol := models.InitColumn("id", "products", "public")
idCol.Type = "integer"
table.Columns["id"] = idCol
// New column with various properties
descCol := models.InitColumn("description", "products", "public")
descCol.Type = "text"
descCol.NotNull = false
table.Columns["description"] = descCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
// Create writer with output to buffer
var buf bytes.Buffer
options := &writers.WriterOptions{}
writer := NewWriter(options)
writer.writer = &buf
// Write ADD COLUMN statements
err := writer.WriteAddColumnStatements(db)
if err != nil {
t.Fatalf("WriteAddColumnStatements failed: %v", err)
}
output := buf.String()
t.Logf("Generated output:\n%s", output)
// Verify output contains expected elements
if !strings.Contains(output, "ALTER TABLE public.products ADD COLUMN id integer") {
t.Errorf("Output missing ADD COLUMN for id\nFull output:\n%s", output)
}
if !strings.Contains(output, "ALTER TABLE public.products ADD COLUMN description text") {
t.Errorf("Output missing ADD COLUMN for description\nFull output:\n%s", output)
}
if !strings.Contains(output, "DO $$") {
t.Errorf("Output missing DO block\nFull output:\n%s", output)
}
}

View File

@@ -4,7 +4,7 @@ The SQL Executor Writer (`sqlexec`) executes SQL scripts from `models.Script` ob
## Features
- **Ordered Execution**: Scripts execute in Priority→Sequence order
- **Ordered Execution**: Scripts execute in Priority→Sequence→Name order
- **PostgreSQL Support**: Uses `pgx/v5` driver for robust PostgreSQL connectivity
- **Stop on Error**: Execution halts immediately on first error (default behavior)
- **Progress Reporting**: Prints execution status to stdout
@@ -103,19 +103,40 @@ Scripts are sorted and executed based on:
1. **Priority** (ascending): Lower priority values execute first
2. **Sequence** (ascending): Within same priority, lower sequence values execute first
3. **Name** (ascending): Within same priority and sequence, alphabetical order by name
### Example Execution Order
Given these scripts:
```
Script A: Priority=2, Sequence=1
Script B: Priority=1, Sequence=3
Script C: Priority=1, Sequence=1
Script D: Priority=1, Sequence=2
Script E: Priority=3, Sequence=1
Script A: Priority=2, Sequence=1, Name="zebra"
Script B: Priority=1, Sequence=3, Name="script"
Script C: Priority=1, Sequence=1, Name="apple"
Script D: Priority=1, Sequence=1, Name="beta"
Script E: Priority=3, Sequence=1, Name="script"
```
Execution order: **C → D → B → A → E**
Execution order: **C (apple) → D (beta) → B → A → E**
### Directory-based Sorting Example
Given these files:
```
1_001_create_schema.sql
1_001_create_users.sql ← Alphabetically before "drop_tables"
1_001_drop_tables.sql
1_002_add_indexes.sql
2_001_constraints.sql
```
Execution order (note alphabetical sorting at same priority/sequence):
```
1_001_create_schema.sql
1_001_create_users.sql
1_001_drop_tables.sql
1_002_add_indexes.sql
2_001_constraints.sql
```
## Output

View File

@@ -23,6 +23,11 @@ func NewWriter(options *writers.WriterOptions) *Writer {
}
}
// Options returns the writer options (useful for reading execution results)
func (w *Writer) Options() *writers.WriterOptions {
return w.options
}
// WriteDatabase executes all scripts from all schemas in the database
func (w *Writer) WriteDatabase(db *models.Database) error {
if db == nil {
@@ -86,20 +91,39 @@ func (w *Writer) WriteTable(table *models.Table) error {
return fmt.Errorf("WriteTable is not supported for SQL script execution")
}
// executeScripts executes scripts in Priority then Sequence order
// executeScripts executes scripts in Priority, Sequence, then Name order
func (w *Writer) executeScripts(ctx context.Context, conn *pgx.Conn, scripts []*models.Script) error {
if len(scripts) == 0 {
return nil
}
// Sort scripts by Priority (ascending) then Sequence (ascending)
// Check if we should ignore errors
ignoreErrors := false
if val, ok := w.options.Metadata["ignore_errors"].(bool); ok {
ignoreErrors = val
}
// Track failed scripts and execution counts
var failedScripts []struct {
name string
priority int
sequence uint
err error
}
successCount := 0
totalCount := 0
// Sort scripts by Priority (ascending), Sequence (ascending), then Name (ascending)
sortedScripts := make([]*models.Script, len(scripts))
copy(sortedScripts, scripts)
sort.Slice(sortedScripts, func(i, j int) bool {
if sortedScripts[i].Priority != sortedScripts[j].Priority {
return sortedScripts[i].Priority < sortedScripts[j].Priority
}
if sortedScripts[i].Sequence != sortedScripts[j].Sequence {
return sortedScripts[i].Sequence < sortedScripts[j].Sequence
}
return sortedScripts[i].Name < sortedScripts[j].Name
})
// Execute each script in order
@@ -108,18 +132,49 @@ func (w *Writer) executeScripts(ctx context.Context, conn *pgx.Conn, scripts []*
continue
}
totalCount++
fmt.Printf("Executing script: %s (Priority=%d, Sequence=%d)\n",
script.Name, script.Priority, script.Sequence)
// Execute the SQL script
_, err := conn.Exec(ctx, script.SQL)
if err != nil {
return fmt.Errorf("failed to execute script %s (Priority=%d, Sequence=%d): %w",
if ignoreErrors {
fmt.Printf("⚠ Error executing %s: %v (continuing due to --ignore-errors)\n", script.Name, err)
failedScripts = append(failedScripts, struct {
name string
priority int
sequence uint
err error
}{
name: script.Name,
priority: script.Priority,
sequence: script.Sequence,
err: err,
})
continue
}
return fmt.Errorf("script %s (Priority=%d, Sequence=%d): %w",
script.Name, script.Priority, script.Sequence, err)
}
successCount++
fmt.Printf("✓ Successfully executed: %s\n", script.Name)
}
// Store execution results in metadata for caller
w.options.Metadata["execution_total"] = totalCount
w.options.Metadata["execution_success"] = successCount
w.options.Metadata["execution_failed"] = len(failedScripts)
// Print summary of failed scripts if any
if len(failedScripts) > 0 {
fmt.Printf("\n⚠ Failed Scripts Summary (%d failed):\n", len(failedScripts))
for i, failed := range failedScripts {
fmt.Printf(" %d. %s (Priority=%d, Sequence=%d)\n Error: %v\n",
i+1, failed.name, failed.priority, failed.sequence, failed.err)
}
}
return nil
}

View File

@@ -99,13 +99,13 @@ func TestWriter_WriteTable(t *testing.T) {
}
}
// TestScriptSorting verifies that scripts are sorted correctly by Priority then Sequence
// TestScriptSorting verifies that scripts are sorted correctly by Priority, Sequence, then Name
func TestScriptSorting(t *testing.T) {
scripts := []*models.Script{
{Name: "script1", Priority: 2, Sequence: 1, SQL: "SELECT 1;"},
{Name: "z_script1", Priority: 2, Sequence: 1, SQL: "SELECT 1;"},
{Name: "script2", Priority: 1, Sequence: 3, SQL: "SELECT 2;"},
{Name: "script3", Priority: 1, Sequence: 1, SQL: "SELECT 3;"},
{Name: "script4", Priority: 1, Sequence: 2, SQL: "SELECT 4;"},
{Name: "a_script3", Priority: 1, Sequence: 1, SQL: "SELECT 3;"},
{Name: "b_script4", Priority: 1, Sequence: 1, SQL: "SELECT 4;"},
{Name: "script5", Priority: 3, Sequence: 1, SQL: "SELECT 5;"},
{Name: "script6", Priority: 2, Sequence: 2, SQL: "SELECT 6;"},
}
@@ -114,23 +114,33 @@ func TestScriptSorting(t *testing.T) {
sortedScripts := make([]*models.Script, len(scripts))
copy(sortedScripts, scripts)
// Use the same sorting logic from executeScripts
// Sort by Priority, Sequence, then Name (matching executeScripts logic)
for i := 0; i < len(sortedScripts)-1; i++ {
for j := i + 1; j < len(sortedScripts); j++ {
if sortedScripts[i].Priority > sortedScripts[j].Priority ||
(sortedScripts[i].Priority == sortedScripts[j].Priority &&
sortedScripts[i].Sequence > sortedScripts[j].Sequence) {
si, sj := sortedScripts[i], sortedScripts[j]
// Compare by priority first
if si.Priority > sj.Priority {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
} else if si.Priority == sj.Priority {
// If same priority, compare by sequence
if si.Sequence > sj.Sequence {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
} else if si.Sequence == sj.Sequence {
// If same sequence, compare by name
if si.Name > sj.Name {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
}
}
}
}
}
// Expected order after sorting
// Expected order after sorting (Priority -> Sequence -> Name)
expectedOrder := []string{
"script3", // Priority 1, Sequence 1
"script4", // Priority 1, Sequence 2
"a_script3", // Priority 1, Sequence 1, Name a_script3
"b_script4", // Priority 1, Sequence 1, Name b_script4
"script2", // Priority 1, Sequence 3
"script1", // Priority 2, Sequence 1
"z_script1", // Priority 2, Sequence 1
"script6", // Priority 2, Sequence 2
"script5", // Priority 3, Sequence 1
}
@@ -153,6 +163,13 @@ func TestScriptSorting(t *testing.T) {
t.Errorf("Sequence not ascending at position %d with same priority %d: %d > %d",
i, sortedScripts[i].Priority, sortedScripts[i].Sequence, sortedScripts[i+1].Sequence)
}
// Within same priority and sequence, names should be ascending
if sortedScripts[i].Priority == sortedScripts[i+1].Priority &&
sortedScripts[i].Sequence == sortedScripts[i+1].Sequence &&
sortedScripts[i].Name > sortedScripts[i+1].Name {
t.Errorf("Name not ascending at position %d with same priority/sequence: %s > %s",
i, sortedScripts[i].Name, sortedScripts[i+1].Name)
}
}
}