2 Commits

Author SHA1 Message Date
51ab29f8e3 feat(writer): 🎉 Update index naming conventions for consistency
All checks were successful
CI / Test (1.24) (push) Successful in -26m25s
CI / Test (1.25) (push) Successful in -26m17s
CI / Lint (push) Successful in -26m32s
CI / Build (push) Successful in -26m42s
Release / Build and Release (push) Successful in -26m31s
Integration Tests / Integration Tests (push) Successful in -26m24s
* Use SQLName() for primary key constraint naming
* Enhance index name formatting with column suffix
2026-01-31 17:23:18 +02:00
f532fc110c feat(writer): 🎉 Enhance script execution order and add symlink skipping
All checks were successful
CI / Test (1.24) (push) Successful in -26m10s
CI / Test (1.25) (push) Successful in -26m8s
CI / Build (push) Successful in -26m44s
CI / Lint (push) Successful in -26m32s
Integration Tests / Integration Tests (push) Successful in -26m26s
* Update script execution to sort by Priority, Sequence, and Name.
* Add functionality to skip symbolic links during directory scanning.
* Improve documentation to reflect changes in execution order and features.
* Add tests for symlink skipping and ensure correct script sorting.
2026-01-31 16:59:17 +02:00
14 changed files with 383 additions and 47 deletions

1
.gitignore vendored
View File

@@ -47,3 +47,4 @@ dist/
build/ build/
bin/ bin/
tests/integration/failed_statements_example.txt tests/integration/failed_statements_example.txt
test_output.log

View File

@@ -39,8 +39,8 @@ Example filenames (hyphen format):
1-002-create-posts.sql # Priority 1, Sequence 2 1-002-create-posts.sql # Priority 1, Sequence 2
10-10-create-newid.pgsql # Priority 10, Sequence 10 10-10-create-newid.pgsql # Priority 10, Sequence 10
Both formats can be mixed in the same directory. Both formats can be mixed in the same directory and subdirectories.
Scripts are executed in order: Priority (ascending), then Sequence (ascending).`, Scripts are executed in order: Priority (ascending), Sequence (ascending), Name (alphabetical).`,
} }
var scriptsListCmd = &cobra.Command{ var scriptsListCmd = &cobra.Command{
@@ -48,8 +48,8 @@ var scriptsListCmd = &cobra.Command{
Short: "List SQL scripts from a directory", Short: "List SQL scripts from a directory",
Long: `List SQL scripts from a directory and show their execution order. Long: `List SQL scripts from a directory and show their execution order.
The scripts are read from the specified directory and displayed in the order The scripts are read recursively from the specified directory and displayed in the order
they would be executed (Priority ascending, then Sequence ascending). they would be executed: Priority (ascending), then Sequence (ascending), then Name (alphabetical).
Example: Example:
relspec scripts list --dir ./migrations`, relspec scripts list --dir ./migrations`,
@@ -61,10 +61,10 @@ var scriptsExecuteCmd = &cobra.Command{
Short: "Execute SQL scripts against a database", Short: "Execute SQL scripts against a database",
Long: `Execute SQL scripts from a directory against a PostgreSQL database. Long: `Execute SQL scripts from a directory against a PostgreSQL database.
Scripts are executed in order: Priority (ascending), then Sequence (ascending). Scripts are executed in order: Priority (ascending), Sequence (ascending), Name (alphabetical).
Execution stops immediately on the first error. Execution stops immediately on the first error.
The directory is scanned recursively for files matching the patterns: The directory is scanned recursively for all subdirectories and files matching the patterns:
{priority}_{sequence}_{name}.sql or .pgsql (underscore format) {priority}_{sequence}_{name}.sql or .pgsql (underscore format)
{priority}-{sequence}-{name}.sql or .pgsql (hyphen format) {priority}-{sequence}-{name}.sql or .pgsql (hyphen format)
@@ -75,7 +75,7 @@ PostgreSQL Connection String Examples:
postgresql://user:pass@host/dbname?sslmode=require postgresql://user:pass@host/dbname?sslmode=require
Examples: Examples:
# Execute migration scripts # Execute migration scripts from a directory (including subdirectories)
relspec scripts execute --dir ./migrations \ relspec scripts execute --dir ./migrations \
--conn "postgres://user:pass@localhost:5432/mydb" --conn "postgres://user:pass@localhost:5432/mydb"
@@ -149,7 +149,7 @@ func runScriptsList(cmd *cobra.Command, args []string) error {
return nil return nil
} }
// Sort scripts by Priority then Sequence // Sort scripts by Priority, Sequence, then Name
sortedScripts := make([]*struct { sortedScripts := make([]*struct {
name string name string
priority int priority int
@@ -186,7 +186,10 @@ func runScriptsList(cmd *cobra.Command, args []string) error {
if sortedScripts[i].priority != sortedScripts[j].priority { if sortedScripts[i].priority != sortedScripts[j].priority {
return sortedScripts[i].priority < sortedScripts[j].priority return sortedScripts[i].priority < sortedScripts[j].priority
} }
return sortedScripts[i].sequence < sortedScripts[j].sequence if sortedScripts[i].sequence != sortedScripts[j].sequence {
return sortedScripts[i].sequence < sortedScripts[j].sequence
}
return sortedScripts[i].name < sortedScripts[j].name
}) })
fmt.Fprintf(os.Stderr, "Found %d script(s) in execution order:\n\n", len(sortedScripts)) fmt.Fprintf(os.Stderr, "Found %d script(s) in execution order:\n\n", len(sortedScripts))
@@ -242,7 +245,7 @@ func runScriptsExecute(cmd *cobra.Command, args []string) error {
fmt.Fprintf(os.Stderr, " ✓ Found %d script(s)\n\n", len(schema.Scripts)) fmt.Fprintf(os.Stderr, " ✓ Found %d script(s)\n\n", len(schema.Scripts))
// Step 2: Execute scripts // Step 2: Execute scripts
fmt.Fprintf(os.Stderr, "[2/2] Executing scripts in order (Priority → Sequence)...\n\n") fmt.Fprintf(os.Stderr, "[2/2] Executing scripts in order (Priority → Sequence → Name)...\n\n")
writer := sqlexec.NewWriter(&writers.WriterOptions{ writer := sqlexec.NewWriter(&writers.WriterOptions{
Metadata: map[string]any{ Metadata: map[string]any{

View File

@@ -93,6 +93,7 @@ fmt.Printf("Found %d scripts\n", len(schema.Scripts))
## Features ## Features
- **Recursive Directory Scanning**: Automatically scans all subdirectories - **Recursive Directory Scanning**: Automatically scans all subdirectories
- **Symlink Skipping**: Symbolic links are automatically skipped (prevents loops and duplicates)
- **Multiple Extensions**: Supports both `.sql` and `.pgsql` files - **Multiple Extensions**: Supports both `.sql` and `.pgsql` files
- **Flexible Naming**: Extract metadata from filename patterns - **Flexible Naming**: Extract metadata from filename patterns
- **Error Handling**: Validates directory existence and file accessibility - **Error Handling**: Validates directory existence and file accessibility
@@ -153,8 +154,9 @@ go test ./pkg/readers/sqldir/
``` ```
Tests include: Tests include:
- Valid file parsing - Valid file parsing (underscore and hyphen formats)
- Recursive directory scanning - Recursive directory scanning
- Symlink skipping
- Invalid filename handling - Invalid filename handling
- Empty directory handling - Empty directory handling
- Error conditions - Error conditions

View File

@@ -107,11 +107,20 @@ func (r *Reader) readScripts() ([]*models.Script, error) {
return err return err
} }
// Skip directories // Don't process directories as files (WalkDir still descends into them recursively)
if d.IsDir() { if d.IsDir() {
return nil return nil
} }
// Skip symlinks
info, err := d.Info()
if err != nil {
return err
}
if info.Mode()&os.ModeSymlink != 0 {
return nil
}
// Get filename // Get filename
filename := d.Name() filename := d.Name()

View File

@@ -373,3 +373,65 @@ func TestReader_MixedFormat(t *testing.T) {
} }
} }
} }
func TestReader_SkipSymlinks(t *testing.T) {
// Create temporary test directory
tempDir, err := os.MkdirTemp("", "sqldir-test-symlink-*")
if err != nil {
t.Fatalf("Failed to create temp directory: %v", err)
}
defer os.RemoveAll(tempDir)
// Create a real SQL file
realFile := filepath.Join(tempDir, "1_001_real_file.sql")
if err := os.WriteFile(realFile, []byte("SELECT 1;"), 0644); err != nil {
t.Fatalf("Failed to create real file: %v", err)
}
// Create another file to link to
targetFile := filepath.Join(tempDir, "2_001_target.sql")
if err := os.WriteFile(targetFile, []byte("SELECT 2;"), 0644); err != nil {
t.Fatalf("Failed to create target file: %v", err)
}
// Create a symlink to the target file (this should be skipped)
symlinkFile := filepath.Join(tempDir, "3_001_symlink.sql")
if err := os.Symlink(targetFile, symlinkFile); err != nil {
// Skip test on systems that don't support symlinks (e.g., Windows without admin)
t.Skipf("Symlink creation not supported: %v", err)
}
// Create reader
reader := NewReader(&readers.ReaderOptions{
FilePath: tempDir,
})
// Read database
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase failed: %v", err)
}
schema := db.Schemas[0]
// Should only have 2 scripts (real_file and target), symlink should be skipped
if len(schema.Scripts) != 2 {
t.Errorf("Expected 2 scripts (symlink should be skipped), got %d", len(schema.Scripts))
}
// Verify the scripts are the real files, not the symlink
scriptNames := make(map[string]bool)
for _, script := range schema.Scripts {
scriptNames[script.Name] = true
}
if !scriptNames["real_file"] {
t.Error("Expected 'real_file' script to be present")
}
if !scriptNames["target"] {
t.Error("Expected 'target' script to be present")
}
if scriptNames["symlink"] {
t.Error("Symlink script should have been skipped but was found")
}
}

View File

@@ -0,0 +1,217 @@
# PostgreSQL Naming Conventions
Standardized naming rules for all database objects in RelSpec PostgreSQL output.
## Quick Reference
| Object Type | Prefix | Format | Example |
| ----------------- | ----------- | ---------------------------------- | ------------------------ |
| Primary Key | `pk_` | `pk_<schema>_<table>` | `pk_public_users` |
| Foreign Key | `fk_` | `fk_<table>_<referenced_table>` | `fk_posts_users` |
| Unique Constraint | `uk_` | `uk_<table>_<column>` | `uk_users_email` |
| Unique Index | `uidx_` | `uidx_<table>_<column>` | `uidx_users_email` |
| Regular Index | `idx_` | `idx_<table>_<column>` | `idx_posts_user_id` |
| Check Constraint | `chk_` | `chk_<table>_<constraint_purpose>` | `chk_users_age_positive` |
| Sequence | `identity_` | `identity_<table>_<column>` | `identity_users_id` |
| Trigger | `t_` | `t_<purpose>_<table>` | `t_audit_users` |
| Trigger Function | `tf_` | `tf_<purpose>_<table>` | `tf_audit_users` |
## Naming Rules by Object Type
### Primary Keys
**Pattern:** `pk_<schema>_<table>`
- Include schema name to avoid collisions across schemas
- Use lowercase, snake_case format
- Examples:
- `pk_public_users`
- `pk_audit_audit_log`
- `pk_staging_temp_data`
### Foreign Keys
**Pattern:** `fk_<table>_<referenced_table>`
- Reference the table containing the FK followed by the referenced table
- Use lowercase, snake_case format
- Do NOT include column names in standard FK constraints
- Examples:
- `fk_posts_users` (posts.user_id → users.id)
- `fk_comments_posts` (comments.post_id → posts.id)
- `fk_order_items_orders` (order_items.order_id → orders.id)
### Unique Constraints
**Pattern:** `uk_<table>_<column>`
- Use `uk_` prefix strictly for database constraints (CONSTRAINT type)
- Include column name for clarity
- Examples:
- `uk_users_email`
- `uk_users_username`
- `uk_products_sku`
### Unique Indexes
**Pattern:** `uidx_<table>_<column>`
- Use `uidx_` prefix strictly for index type objects
- Distinguished from constraints for clarity and implementation flexibility
- Examples:
- `uidx_users_email`
- `uidx_sessions_token`
- `uidx_api_keys_key`
### Regular Indexes
**Pattern:** `idx_<table>_<column>`
- Standard indexes for query optimization
- Single column: `idx_<table>_<column>`
- Examples:
- `idx_posts_user_id`
- `idx_orders_created_at`
- `idx_users_status`
### Check Constraints
**Pattern:** `chk_<table>_<constraint_purpose>`
- Describe the constraint validation purpose
- Use lowercase, snake_case for the purpose
- Examples:
- `chk_users_age_positive` (CHECK (age > 0))
- `chk_orders_quantity_positive` (CHECK (quantity > 0))
- `chk_products_price_valid` (CHECK (price >= 0))
- `chk_users_status_enum` (CHECK (status IN ('active', 'inactive')))
### Sequences
**Pattern:** `identity_<table>_<column>`
- Used for SERIAL/IDENTITY columns
- Explicitly named for clarity and management
- Examples:
- `identity_users_id`
- `identity_posts_id`
- `identity_transactions_id`
### Triggers
**Pattern:** `t_<purpose>_<table>`
- Include purpose before table name
- Lowercase, snake_case format
- Examples:
- `t_audit_users` (audit trigger on users table)
- `t_update_timestamp_posts` (timestamp update trigger on posts)
- `t_validate_orders` (validation trigger on orders)
### Trigger Functions
**Pattern:** `tf_<purpose>_<table>`
- Pair with trigger naming convention
- Use `tf_` prefix to distinguish from triggers themselves
- Examples:
- `tf_audit_users` (function for t_audit_users)
- `tf_update_timestamp_posts` (function for t_update_timestamp_posts)
- `tf_validate_orders` (function for t_validate_orders)
## Multi-Column Objects
### Composite Primary Keys
**Pattern:** `pk_<schema>_<table>`
- Same as single-column PKs
- Example: `pk_public_order_items` (composite key on order_id + item_id)
### Composite Unique Constraints
**Pattern:** `uk_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- Examples:
- `uk_users_email_domain` (UNIQUE(email, domain))
- `uk_inventory_warehouse_sku` (UNIQUE(warehouse_id, sku))
### Composite Unique Indexes
**Pattern:** `uidx_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- Examples:
- `uidx_users_first_name_last_name` (UNIQUE INDEX on first_name, last_name)
- `uidx_sessions_user_id_device_id` (UNIQUE INDEX on user_id, device_id)
### Composite Regular Indexes
**Pattern:** `idx_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- List columns in typical query filter order
- Examples:
- `idx_orders_user_id_created_at` (filter by user, then sort by created_at)
- `idx_logs_level_timestamp` (filter by level, then by timestamp)
## Special Cases & Conventions
### Audit Trail Tables
- Audit table naming: `<original_table>_audit` or `audit_<original_table>`
- Audit indexes follow standard pattern: `idx_<audit_table>_<column>`
- Examples:
- Users table audit: `users_audit` with `idx_users_audit_tablename`, `idx_users_audit_changedate`
- Posts table audit: `posts_audit` with `idx_posts_audit_tablename`, `idx_posts_audit_changedate`
### Temporal/Versioning Tables
- Use suffix `_history` or `_versions` if needed
- Apply standard naming rules with the full table name
- Examples:
- `idx_users_history_user_id`
- `uk_posts_versions_version_number`
### Schema-Specific Objects
- Always qualify with schema when needed: `pk_<schema>_<table>`
- Multiple schemas allowed: `pk_public_users`, `pk_staging_users`
### Reserved Words & Special Names
- Avoid PostgreSQL reserved keywords in object names
- If column/table names conflict, use quoted identifiers in DDL
- Naming convention rules still apply to the logical name
### Generated/Anonymous Indexes
- If an index lacks explicit naming, default to: `idx_<schema>_<table>`
- Should be replaced with explicit names following standards
- Examples (to be renamed):
- `idx_public_users` → should be `idx_users_<column>`
## Implementation Notes
### Code Generation
- Names are always lowercase in generated SQL
- Underscore separators are required
### Migration Safety
- Do NOT rename objects after creation without explicit migration
- Names should be consistent across all schema versions
- Test generated DDL against PostgreSQL before deployment
### Testing
- Ensure consistency across all table and constraint generation
- Test with reserved words to verify escaping
## Related Documentation
- PostgreSQL Identifier Rules: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-IDENTIFIERS
- Constraint Documentation: https://www.postgresql.org/docs/current/ddl-constraints.html
- Index Documentation: https://www.postgresql.org/docs/current/indexes.html

View File

@@ -477,7 +477,7 @@ func (w *MigrationWriter) generateIndexScripts(model *models.Schema, current *mo
} }
if len(pkColumns) > 0 { if len(pkColumns) > 0 {
sort.Strings(pkColumns) sort.Strings(pkColumns)
constraintName := fmt.Sprintf("pk_%s_%s", strings.ToLower(model.Name), strings.ToLower(modelTable.Name)) constraintName := fmt.Sprintf("pk_%s_%s", model.SQLName(), modelTable.SQLName())
shouldCreate := true shouldCreate := true
if currentTable != nil { if currentTable != nil {
@@ -753,7 +753,7 @@ func (w *MigrationWriter) generateAuditScripts(schema *models.Schema, auditConfi
} }
// Generate audit function // Generate audit function
funcName := fmt.Sprintf("ft_audit_%s", table.Name) funcName := fmt.Sprintf("tf_audit_%s", table.Name)
funcData := BuildAuditFunctionData(schema.Name, table, pk, config, auditSchema, auditConfig.UserFunction) funcData := BuildAuditFunctionData(schema.Name, table, pk, config, auditSchema, auditConfig.UserFunction)
funcSQL, err := w.executor.ExecuteAuditFunction(funcData) funcSQL, err := w.executor.ExecuteAuditFunction(funcData)

View File

@@ -121,7 +121,7 @@ func TestWriteMigration_WithAudit(t *testing.T) {
} }
// Verify audit function // Verify audit function
if !strings.Contains(output, "CREATE OR REPLACE FUNCTION public.ft_audit_users()") { if !strings.Contains(output, "CREATE OR REPLACE FUNCTION public.tf_audit_users()") {
t.Error("Migration missing audit function") t.Error("Migration missing audit function")
} }
@@ -177,7 +177,7 @@ func TestTemplateExecutor_AuditFunction(t *testing.T) {
data := AuditFunctionData{ data := AuditFunctionData{
SchemaName: "public", SchemaName: "public",
FunctionName: "ft_audit_users", FunctionName: "tf_audit_users",
TableName: "users", TableName: "users",
TablePrefix: "NULL", TablePrefix: "NULL",
PrimaryKey: "id", PrimaryKey: "id",
@@ -202,7 +202,7 @@ func TestTemplateExecutor_AuditFunction(t *testing.T) {
t.Logf("Generated SQL:\n%s", sql) t.Logf("Generated SQL:\n%s", sql)
if !strings.Contains(sql, "CREATE OR REPLACE FUNCTION public.ft_audit_users()") { if !strings.Contains(sql, "CREATE OR REPLACE FUNCTION public.tf_audit_users()") {
t.Error("SQL missing function definition") t.Error("SQL missing function definition")
} }
if !strings.Contains(sql, "IF TG_OP = 'INSERT'") { if !strings.Contains(sql, "IF TG_OP = 'INSERT'") {

View File

@@ -355,7 +355,7 @@ func BuildAuditFunctionData(
auditSchema string, auditSchema string,
userFunction string, userFunction string,
) AuditFunctionData { ) AuditFunctionData {
funcName := fmt.Sprintf("ft_audit_%s", table.Name) funcName := fmt.Sprintf("tf_audit_%s", table.Name)
// Build list of audited columns // Build list of audited columns
auditedColumns := make([]*models.Column, 0) auditedColumns := make([]*models.Column, 0)

View File

@@ -592,9 +592,10 @@ func (w *Writer) writeIndexes(schema *models.Schema) error {
if indexName == "" { if indexName == "" {
indexType := "idx" indexType := "idx"
if index.Unique { if index.Unique {
indexType = "uk" indexType = "uidx"
} }
indexName = fmt.Sprintf("%s_%s_%s", indexType, schema.SQLName(), table.SQLName()) columnSuffix := strings.Join(index.Columns, "_")
indexName = fmt.Sprintf("%s_%s_%s", indexType, table.SQLName(), strings.ToLower(columnSuffix))
} }
// Build column list with operator class support for GIN indexes // Build column list with operator class support for GIN indexes

View File

@@ -45,11 +45,11 @@ func TestWriteDatabase(t *testing.T) {
// Add unique index // Add unique index
uniqueEmailIndex := &models.Index{ uniqueEmailIndex := &models.Index{
Name: "uk_users_email", Name: "uidx_users_email",
Unique: true, Unique: true,
Columns: []string{"email"}, Columns: []string{"email"},
} }
table.Indexes["uk_users_email"] = uniqueEmailIndex table.Indexes["uidx_users_email"] = uniqueEmailIndex
schema.Tables = append(schema.Tables, table) schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema) db.Schemas = append(db.Schemas, schema)

View File

@@ -4,7 +4,7 @@ The SQL Executor Writer (`sqlexec`) executes SQL scripts from `models.Script` ob
## Features ## Features
- **Ordered Execution**: Scripts execute in Priority→Sequence order - **Ordered Execution**: Scripts execute in Priority→Sequence→Name order
- **PostgreSQL Support**: Uses `pgx/v5` driver for robust PostgreSQL connectivity - **PostgreSQL Support**: Uses `pgx/v5` driver for robust PostgreSQL connectivity
- **Stop on Error**: Execution halts immediately on first error (default behavior) - **Stop on Error**: Execution halts immediately on first error (default behavior)
- **Progress Reporting**: Prints execution status to stdout - **Progress Reporting**: Prints execution status to stdout
@@ -103,19 +103,40 @@ Scripts are sorted and executed based on:
1. **Priority** (ascending): Lower priority values execute first 1. **Priority** (ascending): Lower priority values execute first
2. **Sequence** (ascending): Within same priority, lower sequence values execute first 2. **Sequence** (ascending): Within same priority, lower sequence values execute first
3. **Name** (ascending): Within same priority and sequence, alphabetical order by name
### Example Execution Order ### Example Execution Order
Given these scripts: Given these scripts:
``` ```
Script A: Priority=2, Sequence=1 Script A: Priority=2, Sequence=1, Name="zebra"
Script B: Priority=1, Sequence=3 Script B: Priority=1, Sequence=3, Name="script"
Script C: Priority=1, Sequence=1 Script C: Priority=1, Sequence=1, Name="apple"
Script D: Priority=1, Sequence=2 Script D: Priority=1, Sequence=1, Name="beta"
Script E: Priority=3, Sequence=1 Script E: Priority=3, Sequence=1, Name="script"
``` ```
Execution order: **C → D → B → A → E** Execution order: **C (apple) → D (beta) → B → A → E**
### Directory-based Sorting Example
Given these files:
```
1_001_create_schema.sql
1_001_create_users.sql ← Alphabetically before "drop_tables"
1_001_drop_tables.sql
1_002_add_indexes.sql
2_001_constraints.sql
```
Execution order (note alphabetical sorting at same priority/sequence):
```
1_001_create_schema.sql
1_001_create_users.sql
1_001_drop_tables.sql
1_002_add_indexes.sql
2_001_constraints.sql
```
## Output ## Output

View File

@@ -86,20 +86,23 @@ func (w *Writer) WriteTable(table *models.Table) error {
return fmt.Errorf("WriteTable is not supported for SQL script execution") return fmt.Errorf("WriteTable is not supported for SQL script execution")
} }
// executeScripts executes scripts in Priority then Sequence order // executeScripts executes scripts in Priority, Sequence, then Name order
func (w *Writer) executeScripts(ctx context.Context, conn *pgx.Conn, scripts []*models.Script) error { func (w *Writer) executeScripts(ctx context.Context, conn *pgx.Conn, scripts []*models.Script) error {
if len(scripts) == 0 { if len(scripts) == 0 {
return nil return nil
} }
// Sort scripts by Priority (ascending) then Sequence (ascending) // Sort scripts by Priority (ascending), Sequence (ascending), then Name (ascending)
sortedScripts := make([]*models.Script, len(scripts)) sortedScripts := make([]*models.Script, len(scripts))
copy(sortedScripts, scripts) copy(sortedScripts, scripts)
sort.Slice(sortedScripts, func(i, j int) bool { sort.Slice(sortedScripts, func(i, j int) bool {
if sortedScripts[i].Priority != sortedScripts[j].Priority { if sortedScripts[i].Priority != sortedScripts[j].Priority {
return sortedScripts[i].Priority < sortedScripts[j].Priority return sortedScripts[i].Priority < sortedScripts[j].Priority
} }
return sortedScripts[i].Sequence < sortedScripts[j].Sequence if sortedScripts[i].Sequence != sortedScripts[j].Sequence {
return sortedScripts[i].Sequence < sortedScripts[j].Sequence
}
return sortedScripts[i].Name < sortedScripts[j].Name
}) })
// Execute each script in order // Execute each script in order

View File

@@ -99,13 +99,13 @@ func TestWriter_WriteTable(t *testing.T) {
} }
} }
// TestScriptSorting verifies that scripts are sorted correctly by Priority then Sequence // TestScriptSorting verifies that scripts are sorted correctly by Priority, Sequence, then Name
func TestScriptSorting(t *testing.T) { func TestScriptSorting(t *testing.T) {
scripts := []*models.Script{ scripts := []*models.Script{
{Name: "script1", Priority: 2, Sequence: 1, SQL: "SELECT 1;"}, {Name: "z_script1", Priority: 2, Sequence: 1, SQL: "SELECT 1;"},
{Name: "script2", Priority: 1, Sequence: 3, SQL: "SELECT 2;"}, {Name: "script2", Priority: 1, Sequence: 3, SQL: "SELECT 2;"},
{Name: "script3", Priority: 1, Sequence: 1, SQL: "SELECT 3;"}, {Name: "a_script3", Priority: 1, Sequence: 1, SQL: "SELECT 3;"},
{Name: "script4", Priority: 1, Sequence: 2, SQL: "SELECT 4;"}, {Name: "b_script4", Priority: 1, Sequence: 1, SQL: "SELECT 4;"},
{Name: "script5", Priority: 3, Sequence: 1, SQL: "SELECT 5;"}, {Name: "script5", Priority: 3, Sequence: 1, SQL: "SELECT 5;"},
{Name: "script6", Priority: 2, Sequence: 2, SQL: "SELECT 6;"}, {Name: "script6", Priority: 2, Sequence: 2, SQL: "SELECT 6;"},
} }
@@ -114,25 +114,35 @@ func TestScriptSorting(t *testing.T) {
sortedScripts := make([]*models.Script, len(scripts)) sortedScripts := make([]*models.Script, len(scripts))
copy(sortedScripts, scripts) copy(sortedScripts, scripts)
// Use the same sorting logic from executeScripts // Sort by Priority, Sequence, then Name (matching executeScripts logic)
for i := 0; i < len(sortedScripts)-1; i++ { for i := 0; i < len(sortedScripts)-1; i++ {
for j := i + 1; j < len(sortedScripts); j++ { for j := i + 1; j < len(sortedScripts); j++ {
if sortedScripts[i].Priority > sortedScripts[j].Priority || si, sj := sortedScripts[i], sortedScripts[j]
(sortedScripts[i].Priority == sortedScripts[j].Priority && // Compare by priority first
sortedScripts[i].Sequence > sortedScripts[j].Sequence) { if si.Priority > sj.Priority {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i] sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
} else if si.Priority == sj.Priority {
// If same priority, compare by sequence
if si.Sequence > sj.Sequence {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
} else if si.Sequence == sj.Sequence {
// If same sequence, compare by name
if si.Name > sj.Name {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
}
}
} }
} }
} }
// Expected order after sorting // Expected order after sorting (Priority -> Sequence -> Name)
expectedOrder := []string{ expectedOrder := []string{
"script3", // Priority 1, Sequence 1 "a_script3", // Priority 1, Sequence 1, Name a_script3
"script4", // Priority 1, Sequence 2 "b_script4", // Priority 1, Sequence 1, Name b_script4
"script2", // Priority 1, Sequence 3 "script2", // Priority 1, Sequence 3
"script1", // Priority 2, Sequence 1 "z_script1", // Priority 2, Sequence 1
"script6", // Priority 2, Sequence 2 "script6", // Priority 2, Sequence 2
"script5", // Priority 3, Sequence 1 "script5", // Priority 3, Sequence 1
} }
for i, expected := range expectedOrder { for i, expected := range expectedOrder {
@@ -153,6 +163,13 @@ func TestScriptSorting(t *testing.T) {
t.Errorf("Sequence not ascending at position %d with same priority %d: %d > %d", t.Errorf("Sequence not ascending at position %d with same priority %d: %d > %d",
i, sortedScripts[i].Priority, sortedScripts[i].Sequence, sortedScripts[i+1].Sequence) i, sortedScripts[i].Priority, sortedScripts[i].Sequence, sortedScripts[i+1].Sequence)
} }
// Within same priority and sequence, names should be ascending
if sortedScripts[i].Priority == sortedScripts[i+1].Priority &&
sortedScripts[i].Sequence == sortedScripts[i+1].Sequence &&
sortedScripts[i].Name > sortedScripts[i+1].Name {
t.Errorf("Name not ascending at position %d with same priority/sequence: %s > %s",
i, sortedScripts[i].Name, sortedScripts[i+1].Name)
}
} }
} }