4 Commits

Author SHA1 Message Date
e7a15c8e4f feat(writer): 🎉 Implement add column statements for schema evolution
All checks were successful
CI / Test (1.24) (push) Successful in -26m24s
CI / Test (1.25) (push) Successful in -26m14s
CI / Lint (push) Successful in -26m30s
CI / Build (push) Successful in -26m41s
Release / Build and Release (push) Successful in -26m29s
Integration Tests / Integration Tests (push) Successful in -26m13s
* Add functionality to generate ALTER TABLE ADD COLUMN statements for existing tables.
* Introduce tests for generating and writing add column statements.
* Enhance schema evolution capabilities when new columns are added.
2026-01-31 19:12:00 +02:00
c36b5ede2b feat(writer): 🎉 Enhance primary key handling and add tests
All checks were successful
CI / Test (1.24) (push) Successful in -26m18s
CI / Test (1.25) (push) Successful in -26m11s
CI / Build (push) Successful in -26m43s
CI / Lint (push) Successful in -26m34s
Release / Build and Release (push) Successful in -26m31s
Integration Tests / Integration Tests (push) Successful in -26m20s
* Implement checks for existing primary keys before adding new ones.
* Drop auto-generated primary keys if they exist.
* Add tests for primary key existence and column size specifiers.
* Improve type conversion handling for PostgreSQL compatibility.
2026-01-31 18:59:32 +02:00
51ab29f8e3 feat(writer): 🎉 Update index naming conventions for consistency
All checks were successful
CI / Test (1.24) (push) Successful in -26m25s
CI / Test (1.25) (push) Successful in -26m17s
CI / Lint (push) Successful in -26m32s
CI / Build (push) Successful in -26m42s
Release / Build and Release (push) Successful in -26m31s
Integration Tests / Integration Tests (push) Successful in -26m24s
* Use SQLName() for primary key constraint naming
* Enhance index name formatting with column suffix
2026-01-31 17:23:18 +02:00
f532fc110c feat(writer): 🎉 Enhance script execution order and add symlink skipping
All checks were successful
CI / Test (1.24) (push) Successful in -26m10s
CI / Test (1.25) (push) Successful in -26m8s
CI / Build (push) Successful in -26m44s
CI / Lint (push) Successful in -26m32s
Integration Tests / Integration Tests (push) Successful in -26m26s
* Update script execution to sort by Priority, Sequence, and Name.
* Add functionality to skip symbolic links during directory scanning.
* Improve documentation to reflect changes in execution order and features.
* Add tests for symlink skipping and ensure correct script sorting.
2026-01-31 16:59:17 +02:00
14 changed files with 948 additions and 87 deletions

1
.gitignore vendored
View File

@@ -47,3 +47,4 @@ dist/
build/ build/
bin/ bin/
tests/integration/failed_statements_example.txt tests/integration/failed_statements_example.txt
test_output.log

View File

@@ -39,8 +39,8 @@ Example filenames (hyphen format):
1-002-create-posts.sql # Priority 1, Sequence 2 1-002-create-posts.sql # Priority 1, Sequence 2
10-10-create-newid.pgsql # Priority 10, Sequence 10 10-10-create-newid.pgsql # Priority 10, Sequence 10
Both formats can be mixed in the same directory. Both formats can be mixed in the same directory and subdirectories.
Scripts are executed in order: Priority (ascending), then Sequence (ascending).`, Scripts are executed in order: Priority (ascending), Sequence (ascending), Name (alphabetical).`,
} }
var scriptsListCmd = &cobra.Command{ var scriptsListCmd = &cobra.Command{
@@ -48,8 +48,8 @@ var scriptsListCmd = &cobra.Command{
Short: "List SQL scripts from a directory", Short: "List SQL scripts from a directory",
Long: `List SQL scripts from a directory and show their execution order. Long: `List SQL scripts from a directory and show their execution order.
The scripts are read from the specified directory and displayed in the order The scripts are read recursively from the specified directory and displayed in the order
they would be executed (Priority ascending, then Sequence ascending). they would be executed: Priority (ascending), then Sequence (ascending), then Name (alphabetical).
Example: Example:
relspec scripts list --dir ./migrations`, relspec scripts list --dir ./migrations`,
@@ -61,10 +61,10 @@ var scriptsExecuteCmd = &cobra.Command{
Short: "Execute SQL scripts against a database", Short: "Execute SQL scripts against a database",
Long: `Execute SQL scripts from a directory against a PostgreSQL database. Long: `Execute SQL scripts from a directory against a PostgreSQL database.
Scripts are executed in order: Priority (ascending), then Sequence (ascending). Scripts are executed in order: Priority (ascending), Sequence (ascending), Name (alphabetical).
Execution stops immediately on the first error. Execution stops immediately on the first error.
The directory is scanned recursively for files matching the patterns: The directory is scanned recursively for all subdirectories and files matching the patterns:
{priority}_{sequence}_{name}.sql or .pgsql (underscore format) {priority}_{sequence}_{name}.sql or .pgsql (underscore format)
{priority}-{sequence}-{name}.sql or .pgsql (hyphen format) {priority}-{sequence}-{name}.sql or .pgsql (hyphen format)
@@ -75,7 +75,7 @@ PostgreSQL Connection String Examples:
postgresql://user:pass@host/dbname?sslmode=require postgresql://user:pass@host/dbname?sslmode=require
Examples: Examples:
# Execute migration scripts # Execute migration scripts from a directory (including subdirectories)
relspec scripts execute --dir ./migrations \ relspec scripts execute --dir ./migrations \
--conn "postgres://user:pass@localhost:5432/mydb" --conn "postgres://user:pass@localhost:5432/mydb"
@@ -149,7 +149,7 @@ func runScriptsList(cmd *cobra.Command, args []string) error {
return nil return nil
} }
// Sort scripts by Priority then Sequence // Sort scripts by Priority, Sequence, then Name
sortedScripts := make([]*struct { sortedScripts := make([]*struct {
name string name string
priority int priority int
@@ -186,7 +186,10 @@ func runScriptsList(cmd *cobra.Command, args []string) error {
if sortedScripts[i].priority != sortedScripts[j].priority { if sortedScripts[i].priority != sortedScripts[j].priority {
return sortedScripts[i].priority < sortedScripts[j].priority return sortedScripts[i].priority < sortedScripts[j].priority
} }
return sortedScripts[i].sequence < sortedScripts[j].sequence if sortedScripts[i].sequence != sortedScripts[j].sequence {
return sortedScripts[i].sequence < sortedScripts[j].sequence
}
return sortedScripts[i].name < sortedScripts[j].name
}) })
fmt.Fprintf(os.Stderr, "Found %d script(s) in execution order:\n\n", len(sortedScripts)) fmt.Fprintf(os.Stderr, "Found %d script(s) in execution order:\n\n", len(sortedScripts))
@@ -242,7 +245,7 @@ func runScriptsExecute(cmd *cobra.Command, args []string) error {
fmt.Fprintf(os.Stderr, " ✓ Found %d script(s)\n\n", len(schema.Scripts)) fmt.Fprintf(os.Stderr, " ✓ Found %d script(s)\n\n", len(schema.Scripts))
// Step 2: Execute scripts // Step 2: Execute scripts
fmt.Fprintf(os.Stderr, "[2/2] Executing scripts in order (Priority → Sequence)...\n\n") fmt.Fprintf(os.Stderr, "[2/2] Executing scripts in order (Priority → Sequence → Name)...\n\n")
writer := sqlexec.NewWriter(&writers.WriterOptions{ writer := sqlexec.NewWriter(&writers.WriterOptions{
Metadata: map[string]any{ Metadata: map[string]any{

View File

@@ -93,6 +93,7 @@ fmt.Printf("Found %d scripts\n", len(schema.Scripts))
## Features ## Features
- **Recursive Directory Scanning**: Automatically scans all subdirectories - **Recursive Directory Scanning**: Automatically scans all subdirectories
- **Symlink Skipping**: Symbolic links are automatically skipped (prevents loops and duplicates)
- **Multiple Extensions**: Supports both `.sql` and `.pgsql` files - **Multiple Extensions**: Supports both `.sql` and `.pgsql` files
- **Flexible Naming**: Extract metadata from filename patterns - **Flexible Naming**: Extract metadata from filename patterns
- **Error Handling**: Validates directory existence and file accessibility - **Error Handling**: Validates directory existence and file accessibility
@@ -153,8 +154,9 @@ go test ./pkg/readers/sqldir/
``` ```
Tests include: Tests include:
- Valid file parsing - Valid file parsing (underscore and hyphen formats)
- Recursive directory scanning - Recursive directory scanning
- Symlink skipping
- Invalid filename handling - Invalid filename handling
- Empty directory handling - Empty directory handling
- Error conditions - Error conditions

View File

@@ -107,11 +107,20 @@ func (r *Reader) readScripts() ([]*models.Script, error) {
return err return err
} }
// Skip directories // Don't process directories as files (WalkDir still descends into them recursively)
if d.IsDir() { if d.IsDir() {
return nil return nil
} }
// Skip symlinks
info, err := d.Info()
if err != nil {
return err
}
if info.Mode()&os.ModeSymlink != 0 {
return nil
}
// Get filename // Get filename
filename := d.Name() filename := d.Name()

View File

@@ -373,3 +373,65 @@ func TestReader_MixedFormat(t *testing.T) {
} }
} }
} }
func TestReader_SkipSymlinks(t *testing.T) {
// Create temporary test directory
tempDir, err := os.MkdirTemp("", "sqldir-test-symlink-*")
if err != nil {
t.Fatalf("Failed to create temp directory: %v", err)
}
defer os.RemoveAll(tempDir)
// Create a real SQL file
realFile := filepath.Join(tempDir, "1_001_real_file.sql")
if err := os.WriteFile(realFile, []byte("SELECT 1;"), 0644); err != nil {
t.Fatalf("Failed to create real file: %v", err)
}
// Create another file to link to
targetFile := filepath.Join(tempDir, "2_001_target.sql")
if err := os.WriteFile(targetFile, []byte("SELECT 2;"), 0644); err != nil {
t.Fatalf("Failed to create target file: %v", err)
}
// Create a symlink to the target file (this should be skipped)
symlinkFile := filepath.Join(tempDir, "3_001_symlink.sql")
if err := os.Symlink(targetFile, symlinkFile); err != nil {
// Skip test on systems that don't support symlinks (e.g., Windows without admin)
t.Skipf("Symlink creation not supported: %v", err)
}
// Create reader
reader := NewReader(&readers.ReaderOptions{
FilePath: tempDir,
})
// Read database
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase failed: %v", err)
}
schema := db.Schemas[0]
// Should only have 2 scripts (real_file and target), symlink should be skipped
if len(schema.Scripts) != 2 {
t.Errorf("Expected 2 scripts (symlink should be skipped), got %d", len(schema.Scripts))
}
// Verify the scripts are the real files, not the symlink
scriptNames := make(map[string]bool)
for _, script := range schema.Scripts {
scriptNames[script.Name] = true
}
if !scriptNames["real_file"] {
t.Error("Expected 'real_file' script to be present")
}
if !scriptNames["target"] {
t.Error("Expected 'target' script to be present")
}
if scriptNames["symlink"] {
t.Error("Symlink script should have been skipped but was found")
}
}

View File

@@ -0,0 +1,217 @@
# PostgreSQL Naming Conventions
Standardized naming rules for all database objects in RelSpec PostgreSQL output.
## Quick Reference
| Object Type | Prefix | Format | Example |
| ----------------- | ----------- | ---------------------------------- | ------------------------ |
| Primary Key | `pk_` | `pk_<schema>_<table>` | `pk_public_users` |
| Foreign Key | `fk_` | `fk_<table>_<referenced_table>` | `fk_posts_users` |
| Unique Constraint | `uk_` | `uk_<table>_<column>` | `uk_users_email` |
| Unique Index | `uidx_` | `uidx_<table>_<column>` | `uidx_users_email` |
| Regular Index | `idx_` | `idx_<table>_<column>` | `idx_posts_user_id` |
| Check Constraint | `chk_` | `chk_<table>_<constraint_purpose>` | `chk_users_age_positive` |
| Sequence | `identity_` | `identity_<table>_<column>` | `identity_users_id` |
| Trigger | `t_` | `t_<purpose>_<table>` | `t_audit_users` |
| Trigger Function | `tf_` | `tf_<purpose>_<table>` | `tf_audit_users` |
## Naming Rules by Object Type
### Primary Keys
**Pattern:** `pk_<schema>_<table>`
- Include schema name to avoid collisions across schemas
- Use lowercase, snake_case format
- Examples:
- `pk_public_users`
- `pk_audit_audit_log`
- `pk_staging_temp_data`
### Foreign Keys
**Pattern:** `fk_<table>_<referenced_table>`
- Reference the table containing the FK followed by the referenced table
- Use lowercase, snake_case format
- Do NOT include column names in standard FK constraints
- Examples:
- `fk_posts_users` (posts.user_id → users.id)
- `fk_comments_posts` (comments.post_id → posts.id)
- `fk_order_items_orders` (order_items.order_id → orders.id)
### Unique Constraints
**Pattern:** `uk_<table>_<column>`
- Use `uk_` prefix strictly for database constraints (CONSTRAINT type)
- Include column name for clarity
- Examples:
- `uk_users_email`
- `uk_users_username`
- `uk_products_sku`
### Unique Indexes
**Pattern:** `uidx_<table>_<column>`
- Use `uidx_` prefix strictly for index type objects
- Distinguished from constraints for clarity and implementation flexibility
- Examples:
- `uidx_users_email`
- `uidx_sessions_token`
- `uidx_api_keys_key`
### Regular Indexes
**Pattern:** `idx_<table>_<column>`
- Standard indexes for query optimization
- Single column: `idx_<table>_<column>`
- Examples:
- `idx_posts_user_id`
- `idx_orders_created_at`
- `idx_users_status`
### Check Constraints
**Pattern:** `chk_<table>_<constraint_purpose>`
- Describe the constraint validation purpose
- Use lowercase, snake_case for the purpose
- Examples:
- `chk_users_age_positive` (CHECK (age > 0))
- `chk_orders_quantity_positive` (CHECK (quantity > 0))
- `chk_products_price_valid` (CHECK (price >= 0))
- `chk_users_status_enum` (CHECK (status IN ('active', 'inactive')))
### Sequences
**Pattern:** `identity_<table>_<column>`
- Used for SERIAL/IDENTITY columns
- Explicitly named for clarity and management
- Examples:
- `identity_users_id`
- `identity_posts_id`
- `identity_transactions_id`
### Triggers
**Pattern:** `t_<purpose>_<table>`
- Include purpose before table name
- Lowercase, snake_case format
- Examples:
- `t_audit_users` (audit trigger on users table)
- `t_update_timestamp_posts` (timestamp update trigger on posts)
- `t_validate_orders` (validation trigger on orders)
### Trigger Functions
**Pattern:** `tf_<purpose>_<table>`
- Pair with trigger naming convention
- Use `tf_` prefix to distinguish from triggers themselves
- Examples:
- `tf_audit_users` (function for t_audit_users)
- `tf_update_timestamp_posts` (function for t_update_timestamp_posts)
- `tf_validate_orders` (function for t_validate_orders)
## Multi-Column Objects
### Composite Primary Keys
**Pattern:** `pk_<schema>_<table>`
- Same as single-column PKs
- Example: `pk_public_order_items` (composite key on order_id + item_id)
### Composite Unique Constraints
**Pattern:** `uk_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- Examples:
- `uk_users_email_domain` (UNIQUE(email, domain))
- `uk_inventory_warehouse_sku` (UNIQUE(warehouse_id, sku))
### Composite Unique Indexes
**Pattern:** `uidx_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- Examples:
- `uidx_users_first_name_last_name` (UNIQUE INDEX on first_name, last_name)
- `uidx_sessions_user_id_device_id` (UNIQUE INDEX on user_id, device_id)
### Composite Regular Indexes
**Pattern:** `idx_<table>_<column1>_<column2>_[...]`
- Append all column names in order
- List columns in typical query filter order
- Examples:
- `idx_orders_user_id_created_at` (filter by user, then sort by created_at)
- `idx_logs_level_timestamp` (filter by level, then by timestamp)
## Special Cases & Conventions
### Audit Trail Tables
- Audit table naming: `<original_table>_audit` or `audit_<original_table>`
- Audit indexes follow standard pattern: `idx_<audit_table>_<column>`
- Examples:
- Users table audit: `users_audit` with `idx_users_audit_tablename`, `idx_users_audit_changedate`
- Posts table audit: `posts_audit` with `idx_posts_audit_tablename`, `idx_posts_audit_changedate`
### Temporal/Versioning Tables
- Use suffix `_history` or `_versions` if needed
- Apply standard naming rules with the full table name
- Examples:
- `idx_users_history_user_id`
- `uk_posts_versions_version_number`
### Schema-Specific Objects
- Always qualify with schema when needed: `pk_<schema>_<table>`
- Multiple schemas allowed: `pk_public_users`, `pk_staging_users`
### Reserved Words & Special Names
- Avoid PostgreSQL reserved keywords in object names
- If column/table names conflict, use quoted identifiers in DDL
- Naming convention rules still apply to the logical name
### Generated/Anonymous Indexes
- If an index lacks explicit naming, default to: `idx_<schema>_<table>`
- Should be replaced with explicit names following standards
- Examples (to be renamed):
- `idx_public_users` → should be `idx_users_<column>`
## Implementation Notes
### Code Generation
- Names are always lowercase in generated SQL
- Underscore separators are required
### Migration Safety
- Do NOT rename objects after creation without explicit migration
- Names should be consistent across all schema versions
- Test generated DDL against PostgreSQL before deployment
### Testing
- Ensure consistency across all table and constraint generation
- Test with reserved words to verify escaping
## Related Documentation
- PostgreSQL Identifier Rules: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-IDENTIFIERS
- Constraint Documentation: https://www.postgresql.org/docs/current/ddl-constraints.html
- Index Documentation: https://www.postgresql.org/docs/current/indexes.html

View File

@@ -477,7 +477,7 @@ func (w *MigrationWriter) generateIndexScripts(model *models.Schema, current *mo
} }
if len(pkColumns) > 0 { if len(pkColumns) > 0 {
sort.Strings(pkColumns) sort.Strings(pkColumns)
constraintName := fmt.Sprintf("pk_%s_%s", strings.ToLower(model.Name), strings.ToLower(modelTable.Name)) constraintName := fmt.Sprintf("pk_%s_%s", model.SQLName(), modelTable.SQLName())
shouldCreate := true shouldCreate := true
if currentTable != nil { if currentTable != nil {
@@ -753,7 +753,7 @@ func (w *MigrationWriter) generateAuditScripts(schema *models.Schema, auditConfi
} }
// Generate audit function // Generate audit function
funcName := fmt.Sprintf("ft_audit_%s", table.Name) funcName := fmt.Sprintf("tf_audit_%s", table.Name)
funcData := BuildAuditFunctionData(schema.Name, table, pk, config, auditSchema, auditConfig.UserFunction) funcData := BuildAuditFunctionData(schema.Name, table, pk, config, auditSchema, auditConfig.UserFunction)
funcSQL, err := w.executor.ExecuteAuditFunction(funcData) funcSQL, err := w.executor.ExecuteAuditFunction(funcData)

View File

@@ -121,7 +121,7 @@ func TestWriteMigration_WithAudit(t *testing.T) {
} }
// Verify audit function // Verify audit function
if !strings.Contains(output, "CREATE OR REPLACE FUNCTION public.ft_audit_users()") { if !strings.Contains(output, "CREATE OR REPLACE FUNCTION public.tf_audit_users()") {
t.Error("Migration missing audit function") t.Error("Migration missing audit function")
} }
@@ -177,7 +177,7 @@ func TestTemplateExecutor_AuditFunction(t *testing.T) {
data := AuditFunctionData{ data := AuditFunctionData{
SchemaName: "public", SchemaName: "public",
FunctionName: "ft_audit_users", FunctionName: "tf_audit_users",
TableName: "users", TableName: "users",
TablePrefix: "NULL", TablePrefix: "NULL",
PrimaryKey: "id", PrimaryKey: "id",
@@ -202,7 +202,7 @@ func TestTemplateExecutor_AuditFunction(t *testing.T) {
t.Logf("Generated SQL:\n%s", sql) t.Logf("Generated SQL:\n%s", sql)
if !strings.Contains(sql, "CREATE OR REPLACE FUNCTION public.ft_audit_users()") { if !strings.Contains(sql, "CREATE OR REPLACE FUNCTION public.tf_audit_users()") {
t.Error("SQL missing function definition") t.Error("SQL missing function definition")
} }
if !strings.Contains(sql, "IF TG_OP = 'INSERT'") { if !strings.Contains(sql, "IF TG_OP = 'INSERT'") {

View File

@@ -355,7 +355,7 @@ func BuildAuditFunctionData(
auditSchema string, auditSchema string,
userFunction string, userFunction string,
) AuditFunctionData { ) AuditFunctionData {
funcName := fmt.Sprintf("ft_audit_%s", table.Name) funcName := fmt.Sprintf("tf_audit_%s", table.Name)
// Build list of audited columns // Build list of audited columns
auditedColumns := make([]*models.Column, 0) auditedColumns := make([]*models.Column, 0)

View File

@@ -168,6 +168,13 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
statements = append(statements, stmts...) statements = append(statements, stmts...)
} }
// Phase 3.5: Add missing columns (for existing tables)
addColStmts, err := w.GenerateAddColumnStatements(schema)
if err != nil {
return nil, fmt.Errorf("failed to generate add column statements: %w", err)
}
statements = append(statements, addColStmts...)
// Phase 4: Primary keys // Phase 4: Primary keys
for _, table := range schema.Tables { for _, table := range schema.Tables {
// First check for explicit PrimaryKeyConstraint // First check for explicit PrimaryKeyConstraint
@@ -179,27 +186,67 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
} }
} }
var pkColumns []string
var pkName string
if pkConstraint != nil { if pkConstraint != nil {
stmt := fmt.Sprintf("ALTER TABLE %s.%s ADD CONSTRAINT %s PRIMARY KEY (%s)", pkColumns = pkConstraint.Columns
schema.SQLName(), table.SQLName(), pkConstraint.Name, strings.Join(pkConstraint.Columns, ", ")) pkName = pkConstraint.Name
statements = append(statements, stmt)
} else { } else {
// No explicit constraint, check for columns with IsPrimaryKey = true // No explicit constraint, check for columns with IsPrimaryKey = true
pkColumns := []string{} pkCols := []string{}
for _, col := range table.Columns { for _, col := range table.Columns {
if col.IsPrimaryKey { if col.IsPrimaryKey {
pkColumns = append(pkColumns, col.SQLName()) pkCols = append(pkCols, col.SQLName())
} }
} }
if len(pkColumns) > 0 { if len(pkCols) > 0 {
// Sort for consistent output // Sort for consistent output
sort.Strings(pkColumns) sort.Strings(pkCols)
pkName := fmt.Sprintf("pk_%s_%s", schema.SQLName(), table.SQLName()) pkColumns = pkCols
stmt := fmt.Sprintf("ALTER TABLE %s.%s ADD CONSTRAINT %s PRIMARY KEY (%s)", pkName = fmt.Sprintf("pk_%s_%s", schema.SQLName(), table.SQLName())
schema.SQLName(), table.SQLName(), pkName, strings.Join(pkColumns, ", "))
statements = append(statements, stmt)
} }
} }
if len(pkColumns) > 0 {
// Auto-generated primary key names to check for and drop
autoGenPKNames := []string{
fmt.Sprintf("%s_pkey", table.Name),
fmt.Sprintf("%s_%s_pkey", schema.Name, table.Name),
}
// Wrap in DO block to drop auto-generated PK and add our named PK
stmt := fmt.Sprintf("DO $$\nDECLARE\n"+
" auto_pk_name text;\n"+
"BEGIN\n"+
" -- Drop auto-generated primary key if it exists\n"+
" SELECT constraint_name INTO auto_pk_name\n"+
" FROM information_schema.table_constraints\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND constraint_type = 'PRIMARY KEY'\n"+
" AND constraint_name IN (%s);\n"+
"\n"+
" IF auto_pk_name IS NOT NULL THEN\n"+
" EXECUTE 'ALTER TABLE %s.%s DROP CONSTRAINT ' || quote_ident(auto_pk_name);\n"+
" END IF;\n"+
"\n"+
" -- Add named primary key if it doesn't exist\n"+
" IF NOT EXISTS (\n"+
" SELECT 1 FROM information_schema.table_constraints\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND constraint_name = '%s'\n"+
" ) THEN\n"+
" ALTER TABLE %s.%s ADD CONSTRAINT %s PRIMARY KEY (%s);\n"+
" END IF;\n"+
"END;\n$$",
schema.Name, table.Name, formatStringList(autoGenPKNames),
schema.SQLName(), table.SQLName(),
schema.Name, table.Name, pkName,
schema.SQLName(), table.SQLName(), pkName, strings.Join(pkColumns, ", "))
statements = append(statements, stmt)
}
} }
// Phase 5: Indexes // Phase 5: Indexes
@@ -270,7 +317,18 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
onUpdate = "NO ACTION" onUpdate = "NO ACTION"
} }
stmt := fmt.Sprintf("ALTER TABLE %s.%s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s.%s(%s) ON DELETE %s ON UPDATE %s", // Wrap in DO block to check for existing constraint
stmt := fmt.Sprintf("DO $$\nBEGIN\n"+
" IF NOT EXISTS (\n"+
" SELECT 1 FROM information_schema.table_constraints\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND constraint_name = '%s'\n"+
" ) THEN\n"+
" ALTER TABLE %s.%s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s.%s(%s) ON DELETE %s ON UPDATE %s;\n"+
" END IF;\n"+
"END;\n$$",
schema.Name, table.Name, constraint.Name,
schema.SQLName(), table.SQLName(), constraint.Name, schema.SQLName(), table.SQLName(), constraint.Name,
strings.Join(constraint.Columns, ", "), strings.Join(constraint.Columns, ", "),
strings.ToLower(refSchema), strings.ToLower(constraint.ReferencedTable), strings.ToLower(refSchema), strings.ToLower(constraint.ReferencedTable),
@@ -300,6 +358,68 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
return statements, nil return statements, nil
} }
// GenerateAddColumnStatements generates ALTER TABLE ADD COLUMN statements for existing tables
// This is useful for schema evolution when new columns are added to existing tables
func (w *Writer) GenerateAddColumnStatements(schema *models.Schema) ([]string, error) {
statements := []string{}
statements = append(statements, fmt.Sprintf("-- Add missing columns for schema: %s", schema.Name))
for _, table := range schema.Tables {
// Sort columns by sequence or name for consistent output
columns := make([]*models.Column, 0, len(table.Columns))
for _, col := range table.Columns {
columns = append(columns, col)
}
sort.Slice(columns, func(i, j int) bool {
if columns[i].Sequence != columns[j].Sequence {
return columns[i].Sequence < columns[j].Sequence
}
return columns[i].Name < columns[j].Name
})
for _, col := range columns {
colDef := w.generateColumnDefinition(col)
// Generate DO block that checks if column exists before adding
stmt := fmt.Sprintf("DO $$\nBEGIN\n"+
" IF NOT EXISTS (\n"+
" SELECT 1 FROM information_schema.columns\n"+
" WHERE table_schema = '%s'\n"+
" AND table_name = '%s'\n"+
" AND column_name = '%s'\n"+
" ) THEN\n"+
" ALTER TABLE %s.%s ADD COLUMN %s;\n"+
" END IF;\n"+
"END;\n$$",
schema.Name, table.Name, col.Name,
schema.SQLName(), table.SQLName(), colDef)
statements = append(statements, stmt)
}
}
return statements, nil
}
// GenerateAddColumnsForDatabase generates ALTER TABLE ADD COLUMN statements for the entire database
func (w *Writer) GenerateAddColumnsForDatabase(db *models.Database) ([]string, error) {
statements := []string{}
statements = append(statements, "-- Add missing columns to existing tables")
statements = append(statements, fmt.Sprintf("-- Database: %s", db.Name))
statements = append(statements, "-- Generated by RelSpec")
for _, schema := range db.Schemas {
schemaStatements, err := w.GenerateAddColumnStatements(schema)
if err != nil {
return nil, fmt.Errorf("failed to generate add column statements for schema %s: %w", schema.Name, err)
}
statements = append(statements, schemaStatements...)
}
return statements, nil
}
// generateCreateTableStatement generates CREATE TABLE statement // generateCreateTableStatement generates CREATE TABLE statement
func (w *Writer) generateCreateTableStatement(schema *models.Schema, table *models.Table) ([]string, error) { func (w *Writer) generateCreateTableStatement(schema *models.Schema, table *models.Table) ([]string, error) {
statements := []string{} statements := []string{}
@@ -322,7 +442,7 @@ func (w *Writer) generateCreateTableStatement(schema *models.Schema, table *mode
columnDefs = append(columnDefs, " "+def) columnDefs = append(columnDefs, " "+def)
} }
stmt := fmt.Sprintf("CREATE TABLE %s.%s (\n%s\n)", stmt := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.%s (\n%s\n)",
schema.SQLName(), table.SQLName(), strings.Join(columnDefs, ",\n")) schema.SQLName(), table.SQLName(), strings.Join(columnDefs, ",\n"))
statements = append(statements, stmt) statements = append(statements, stmt)
@@ -336,14 +456,25 @@ func (w *Writer) generateColumnDefinition(col *models.Column) string {
// Type with length/precision - convert to valid PostgreSQL type // Type with length/precision - convert to valid PostgreSQL type
baseType := pgsql.ConvertSQLType(col.Type) baseType := pgsql.ConvertSQLType(col.Type)
typeStr := baseType typeStr := baseType
// Only add size specifiers for types that support them
if col.Length > 0 && col.Precision == 0 { if col.Length > 0 && col.Precision == 0 {
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Length) if supportsLength(baseType) {
} else if col.Precision > 0 { typeStr = fmt.Sprintf("%s(%d)", baseType, col.Length)
if col.Scale > 0 { } else if isTextTypeWithoutLength(baseType) {
typeStr = fmt.Sprintf("%s(%d,%d)", baseType, col.Precision, col.Scale) // Convert text with length to varchar
} else { typeStr = fmt.Sprintf("varchar(%d)", col.Length)
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Precision)
} }
// For types that don't support length (integer, bigint, etc.), ignore the length
} else if col.Precision > 0 {
if supportsPrecision(baseType) {
if col.Scale > 0 {
typeStr = fmt.Sprintf("%s(%d,%d)", baseType, col.Precision, col.Scale)
} else {
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Precision)
}
}
// For types that don't support precision, ignore it
} }
parts = append(parts, typeStr) parts = append(parts, typeStr)
@@ -396,6 +527,11 @@ func (w *Writer) WriteSchema(schema *models.Schema) error {
return err return err
} }
// Phase 3.5: Add missing columns (priority 120)
if err := w.writeAddColumns(schema); err != nil {
return err
}
// Phase 4: Create primary keys (priority 160) // Phase 4: Create primary keys (priority 160)
if err := w.writePrimaryKeys(schema); err != nil { if err := w.writePrimaryKeys(schema); err != nil {
return err return err
@@ -437,6 +573,44 @@ func (w *Writer) WriteTable(table *models.Table) error {
return w.WriteSchema(schema) return w.WriteSchema(schema)
} }
// WriteAddColumnStatements writes ALTER TABLE ADD COLUMN statements for a database
// This is used for schema evolution/migration when new columns are added
func (w *Writer) WriteAddColumnStatements(db *models.Database) error {
var writer io.Writer
var file *os.File
var err error
// Use existing writer if already set (for testing)
if w.writer != nil {
writer = w.writer
} else if w.options.OutputPath != "" {
// Determine output destination
file, err = os.Create(w.options.OutputPath)
if err != nil {
return fmt.Errorf("failed to create output file: %w", err)
}
defer file.Close()
writer = file
} else {
writer = os.Stdout
}
w.writer = writer
// Generate statements
statements, err := w.GenerateAddColumnsForDatabase(db)
if err != nil {
return err
}
// Write each statement
for _, stmt := range statements {
fmt.Fprintf(w.writer, "%s;\n\n", stmt)
}
return nil
}
// writeCreateSchema generates CREATE SCHEMA statement // writeCreateSchema generates CREATE SCHEMA statement
func (w *Writer) writeCreateSchema(schema *models.Schema) error { func (w *Writer) writeCreateSchema(schema *models.Schema) error {
if schema.Name == "public" { if schema.Name == "public" {
@@ -490,15 +664,8 @@ func (w *Writer) writeCreateTables(schema *models.Schema) error {
columnDefs := make([]string, 0, len(columns)) columnDefs := make([]string, 0, len(columns))
for _, col := range columns { for _, col := range columns {
colDef := fmt.Sprintf(" %s %s", col.SQLName(), pgsql.ConvertSQLType(col.Type)) // Use generateColumnDefinition to properly handle type, length, precision, and defaults
colDef := " " + w.generateColumnDefinition(col)
// Add default value if present
if col.Default != nil && col.Default != "" {
// Strip backticks - DBML uses them for SQL expressions but PostgreSQL doesn't
defaultVal := fmt.Sprintf("%v", col.Default)
colDef += fmt.Sprintf(" DEFAULT %s", stripBackticks(defaultVal))
}
columnDefs = append(columnDefs, colDef) columnDefs = append(columnDefs, colDef)
} }
@@ -509,6 +676,35 @@ func (w *Writer) writeCreateTables(schema *models.Schema) error {
return nil return nil
} }
// writeAddColumns generates ALTER TABLE ADD COLUMN statements for missing columns
func (w *Writer) writeAddColumns(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Add missing columns for schema: %s\n", schema.Name)
for _, table := range schema.Tables {
// Sort columns by sequence or name for consistent output
columns := getSortedColumns(table.Columns)
for _, col := range columns {
colDef := w.generateColumnDefinition(col)
// Generate DO block that checks if column exists before adding
fmt.Fprintf(w.writer, "DO $$\nBEGIN\n")
fmt.Fprintf(w.writer, " IF NOT EXISTS (\n")
fmt.Fprintf(w.writer, " SELECT 1 FROM information_schema.columns\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
fmt.Fprintf(w.writer, " AND table_name = '%s'\n", table.Name)
fmt.Fprintf(w.writer, " AND column_name = '%s'\n", col.Name)
fmt.Fprintf(w.writer, " ) THEN\n")
fmt.Fprintf(w.writer, " ALTER TABLE %s.%s ADD COLUMN %s;\n",
schema.SQLName(), table.SQLName(), colDef)
fmt.Fprintf(w.writer, " END IF;\n")
fmt.Fprintf(w.writer, "END;\n$$;\n\n")
}
}
return nil
}
// writePrimaryKeys generates ALTER TABLE statements for primary keys // writePrimaryKeys generates ALTER TABLE statements for primary keys
func (w *Writer) writePrimaryKeys(schema *models.Schema) error { func (w *Writer) writePrimaryKeys(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Primary keys for schema: %s\n", schema.Name) fmt.Fprintf(w.writer, "-- Primary keys for schema: %s\n", schema.Name)
@@ -550,7 +746,32 @@ func (w *Writer) writePrimaryKeys(schema *models.Schema) error {
continue continue
} }
fmt.Fprintf(w.writer, "DO $$\nBEGIN\n") // Auto-generated primary key names to check for and drop
autoGenPKNames := []string{
fmt.Sprintf("%s_pkey", table.Name),
fmt.Sprintf("%s_%s_pkey", schema.Name, table.Name),
}
fmt.Fprintf(w.writer, "DO $$\nDECLARE\n")
fmt.Fprintf(w.writer, " auto_pk_name text;\nBEGIN\n")
// Check for and drop auto-generated primary keys
fmt.Fprintf(w.writer, " -- Drop auto-generated primary key if it exists\n")
fmt.Fprintf(w.writer, " SELECT constraint_name INTO auto_pk_name\n")
fmt.Fprintf(w.writer, " FROM information_schema.table_constraints\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
fmt.Fprintf(w.writer, " AND table_name = '%s'\n", table.Name)
fmt.Fprintf(w.writer, " AND constraint_type = 'PRIMARY KEY'\n")
fmt.Fprintf(w.writer, " AND constraint_name IN (%s);\n", formatStringList(autoGenPKNames))
fmt.Fprintf(w.writer, "\n")
fmt.Fprintf(w.writer, " IF auto_pk_name IS NOT NULL THEN\n")
fmt.Fprintf(w.writer, " EXECUTE 'ALTER TABLE %s.%s DROP CONSTRAINT ' || quote_ident(auto_pk_name);\n",
schema.SQLName(), table.SQLName())
fmt.Fprintf(w.writer, " END IF;\n")
fmt.Fprintf(w.writer, "\n")
// Add our named primary key if it doesn't exist
fmt.Fprintf(w.writer, " -- Add named primary key if it doesn't exist\n")
fmt.Fprintf(w.writer, " IF NOT EXISTS (\n") fmt.Fprintf(w.writer, " IF NOT EXISTS (\n")
fmt.Fprintf(w.writer, " SELECT 1 FROM information_schema.table_constraints\n") fmt.Fprintf(w.writer, " SELECT 1 FROM information_schema.table_constraints\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name) fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
@@ -592,9 +813,10 @@ func (w *Writer) writeIndexes(schema *models.Schema) error {
if indexName == "" { if indexName == "" {
indexType := "idx" indexType := "idx"
if index.Unique { if index.Unique {
indexType = "uk" indexType = "uidx"
} }
indexName = fmt.Sprintf("%s_%s_%s", indexType, schema.SQLName(), table.SQLName()) columnSuffix := strings.Join(index.Columns, "_")
indexName = fmt.Sprintf("%s_%s_%s", indexType, table.SQLName(), strings.ToLower(columnSuffix))
} }
// Build column list with operator class support for GIN indexes // Build column list with operator class support for GIN indexes
@@ -710,13 +932,6 @@ func (w *Writer) writeForeignKeys(schema *models.Schema) error {
onUpdate = strings.ToUpper(fkConstraint.OnUpdate) onUpdate = strings.ToUpper(fkConstraint.OnUpdate)
} }
fmt.Fprintf(w.writer, "ALTER TABLE %s.%s\n", schema.SQLName(), table.SQLName())
fmt.Fprintf(w.writer, " DROP CONSTRAINT IF EXISTS %s;\n", fkName)
fmt.Fprintf(w.writer, "\n")
fmt.Fprintf(w.writer, "ALTER TABLE %s.%s\n", schema.SQLName(), table.SQLName())
fmt.Fprintf(w.writer, " ADD CONSTRAINT %s\n", fkName)
fmt.Fprintf(w.writer, " FOREIGN KEY (%s)\n", strings.Join(sourceColumns, ", "))
// Use constraint's referenced schema/table or relationship's ToSchema/ToTable // Use constraint's referenced schema/table or relationship's ToSchema/ToTable
refSchema := fkConstraint.ReferencedSchema refSchema := fkConstraint.ReferencedSchema
if refSchema == "" { if refSchema == "" {
@@ -727,11 +942,24 @@ func (w *Writer) writeForeignKeys(schema *models.Schema) error {
refTable = rel.ToTable refTable = rel.ToTable
} }
fmt.Fprintf(w.writer, " REFERENCES %s.%s (%s)\n", // Use DO block to check if constraint exists before adding
fmt.Fprintf(w.writer, "DO $$\nBEGIN\n")
fmt.Fprintf(w.writer, " IF NOT EXISTS (\n")
fmt.Fprintf(w.writer, " SELECT 1 FROM information_schema.table_constraints\n")
fmt.Fprintf(w.writer, " WHERE table_schema = '%s'\n", schema.Name)
fmt.Fprintf(w.writer, " AND table_name = '%s'\n", table.Name)
fmt.Fprintf(w.writer, " AND constraint_name = '%s'\n", fkName)
fmt.Fprintf(w.writer, " ) THEN\n")
fmt.Fprintf(w.writer, " ALTER TABLE %s.%s\n", schema.SQLName(), table.SQLName())
fmt.Fprintf(w.writer, " ADD CONSTRAINT %s\n", fkName)
fmt.Fprintf(w.writer, " FOREIGN KEY (%s)\n", strings.Join(sourceColumns, ", "))
fmt.Fprintf(w.writer, " REFERENCES %s.%s (%s)\n",
refSchema, refTable, strings.Join(targetColumns, ", ")) refSchema, refTable, strings.Join(targetColumns, ", "))
fmt.Fprintf(w.writer, " ON DELETE %s\n", onDelete) fmt.Fprintf(w.writer, " ON DELETE %s\n", onDelete)
fmt.Fprintf(w.writer, " ON UPDATE %s\n", onUpdate) fmt.Fprintf(w.writer, " ON UPDATE %s\n", onUpdate)
fmt.Fprintf(w.writer, " DEFERRABLE;\n\n") fmt.Fprintf(w.writer, " DEFERRABLE;\n")
fmt.Fprintf(w.writer, " END IF;\n")
fmt.Fprintf(w.writer, "END;\n$$;\n\n")
} }
} }
@@ -843,6 +1071,44 @@ func isTextType(colType string) bool {
return false return false
} }
// supportsLength checks if a PostgreSQL type supports length specification
func supportsLength(colType string) bool {
lengthTypes := []string{"varchar", "character varying", "char", "character", "bit", "bit varying", "varbit"}
lowerType := strings.ToLower(colType)
for _, t := range lengthTypes {
if lowerType == t || strings.HasPrefix(lowerType, t+"(") {
return true
}
}
return false
}
// supportsPrecision checks if a PostgreSQL type supports precision/scale specification
func supportsPrecision(colType string) bool {
precisionTypes := []string{"numeric", "decimal", "time", "timestamp", "timestamptz", "timestamp with time zone", "timestamp without time zone", "time with time zone", "time without time zone", "interval"}
lowerType := strings.ToLower(colType)
for _, t := range precisionTypes {
if lowerType == t || strings.HasPrefix(lowerType, t+"(") {
return true
}
}
return false
}
// isTextTypeWithoutLength checks if type is text (which should convert to varchar when length is specified)
func isTextTypeWithoutLength(colType string) bool {
return strings.EqualFold(colType, "text")
}
// formatStringList formats a list of strings as a SQL-safe comma-separated quoted list
func formatStringList(items []string) string {
quoted := make([]string, len(items))
for i, item := range items {
quoted[i] = fmt.Sprintf("'%s'", escapeQuote(item))
}
return strings.Join(quoted, ", ")
}
// extractOperatorClass extracts operator class from index comment/note // extractOperatorClass extracts operator class from index comment/note
// Looks for common operator classes like gin_trgm_ops, gist_trgm_ops, etc. // Looks for common operator classes like gin_trgm_ops, gist_trgm_ops, etc.
func extractOperatorClass(comment string) string { func extractOperatorClass(comment string) string {

View File

@@ -45,11 +45,11 @@ func TestWriteDatabase(t *testing.T) {
// Add unique index // Add unique index
uniqueEmailIndex := &models.Index{ uniqueEmailIndex := &models.Index{
Name: "uk_users_email", Name: "uidx_users_email",
Unique: true, Unique: true,
Columns: []string{"email"}, Columns: []string{"email"},
} }
table.Indexes["uk_users_email"] = uniqueEmailIndex table.Indexes["uidx_users_email"] = uniqueEmailIndex
schema.Tables = append(schema.Tables, table) schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema) db.Schemas = append(db.Schemas, schema)
@@ -305,3 +305,263 @@ func TestTypeConversion(t *testing.T) {
t.Errorf("Output missing 'smallint' type (converted from 'int16')\nFull output:\n%s", output) t.Errorf("Output missing 'smallint' type (converted from 'int16')\nFull output:\n%s", output)
} }
} }
func TestPrimaryKeyExistenceCheck(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("products", "public")
idCol := models.InitColumn("id", "products", "public")
idCol.Type = "integer"
idCol.IsPrimaryKey = true
table.Columns["id"] = idCol
nameCol := models.InitColumn("name", "products", "public")
nameCol.Type = "text"
table.Columns["name"] = nameCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
options := &writers.WriterOptions{}
writer := NewWriter(options)
writer.writer = &buf
err := writer.WriteDatabase(db)
if err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
t.Logf("Generated SQL:\n%s", output)
// Verify our naming convention is used
if !strings.Contains(output, "pk_public_products") {
t.Errorf("Output missing expected primary key name 'pk_public_products'\nFull output:\n%s", output)
}
// Verify it drops auto-generated primary keys
if !strings.Contains(output, "products_pkey") || !strings.Contains(output, "DROP CONSTRAINT") {
t.Errorf("Output missing logic to drop auto-generated primary key\nFull output:\n%s", output)
}
// Verify it checks for our specific named constraint before adding it
if !strings.Contains(output, "constraint_name = 'pk_public_products'") {
t.Errorf("Output missing check for our named primary key constraint\nFull output:\n%s", output)
}
}
func TestColumnSizeSpecifiers(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("test_sizes", "public")
// Integer with invalid size specifier - should ignore size
integerCol := models.InitColumn("int_col", "test_sizes", "public")
integerCol.Type = "integer"
integerCol.Length = 32
table.Columns["int_col"] = integerCol
// Bigint with invalid size specifier - should ignore size
bigintCol := models.InitColumn("bigint_col", "test_sizes", "public")
bigintCol.Type = "bigint"
bigintCol.Length = 64
table.Columns["bigint_col"] = bigintCol
// Smallint with invalid size specifier - should ignore size
smallintCol := models.InitColumn("smallint_col", "test_sizes", "public")
smallintCol.Type = "smallint"
smallintCol.Length = 16
table.Columns["smallint_col"] = smallintCol
// Text with length - should convert to varchar
textCol := models.InitColumn("text_col", "test_sizes", "public")
textCol.Type = "text"
textCol.Length = 100
table.Columns["text_col"] = textCol
// Varchar with length - should keep varchar with length
varcharCol := models.InitColumn("varchar_col", "test_sizes", "public")
varcharCol.Type = "varchar"
varcharCol.Length = 50
table.Columns["varchar_col"] = varcharCol
// Decimal with precision and scale - should keep them
decimalCol := models.InitColumn("decimal_col", "test_sizes", "public")
decimalCol.Type = "decimal"
decimalCol.Precision = 19
decimalCol.Scale = 4
table.Columns["decimal_col"] = decimalCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
options := &writers.WriterOptions{}
writer := NewWriter(options)
writer.writer = &buf
err := writer.WriteDatabase(db)
if err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
t.Logf("Generated SQL:\n%s", output)
// Verify invalid size specifiers are NOT present
invalidPatterns := []string{
"integer(32)",
"bigint(64)",
"smallint(16)",
"text(100)",
}
for _, pattern := range invalidPatterns {
if strings.Contains(output, pattern) {
t.Errorf("Output contains invalid pattern '%s' - PostgreSQL doesn't support this\nFull output:\n%s", pattern, output)
}
}
// Verify valid patterns ARE present
validPatterns := []string{
"integer", // without size
"bigint", // without size
"smallint", // without size
"varchar(100)", // text converted to varchar with length
"varchar(50)", // varchar with length
"decimal(19,4)", // decimal with precision and scale
}
for _, pattern := range validPatterns {
if !strings.Contains(output, pattern) {
t.Errorf("Output missing expected pattern '%s'\nFull output:\n%s", pattern, output)
}
}
}
func TestGenerateAddColumnStatements(t *testing.T) {
// Create a test database with tables that have new columns
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
// Create a table with columns
table := models.InitTable("users", "public")
// Existing column
idCol := models.InitColumn("id", "users", "public")
idCol.Type = "integer"
idCol.NotNull = true
idCol.Sequence = 1
table.Columns["id"] = idCol
// New column to be added
emailCol := models.InitColumn("email", "users", "public")
emailCol.Type = "varchar"
emailCol.Length = 255
emailCol.NotNull = true
emailCol.Sequence = 2
table.Columns["email"] = emailCol
// New column with default
statusCol := models.InitColumn("status", "users", "public")
statusCol.Type = "text"
statusCol.Default = "active"
statusCol.Sequence = 3
table.Columns["status"] = statusCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
// Create writer
options := &writers.WriterOptions{}
writer := NewWriter(options)
// Generate ADD COLUMN statements
statements, err := writer.GenerateAddColumnsForDatabase(db)
if err != nil {
t.Fatalf("GenerateAddColumnsForDatabase failed: %v", err)
}
// Join all statements to verify content
output := strings.Join(statements, "\n")
t.Logf("Generated ADD COLUMN statements:\n%s", output)
// Verify expected elements
expectedStrings := []string{
"ALTER TABLE public.users ADD COLUMN id integer NOT NULL",
"ALTER TABLE public.users ADD COLUMN email varchar(255) NOT NULL",
"ALTER TABLE public.users ADD COLUMN status text DEFAULT 'active'",
"information_schema.columns",
"table_schema = 'public'",
"table_name = 'users'",
"column_name = 'id'",
"column_name = 'email'",
"column_name = 'status'",
}
for _, expected := range expectedStrings {
if !strings.Contains(output, expected) {
t.Errorf("Output missing expected string: %s\nFull output:\n%s", expected, output)
}
}
// Verify DO blocks are present for conditional adds
doBlockCount := strings.Count(output, "DO $$")
if doBlockCount < 3 {
t.Errorf("Expected at least 3 DO blocks (one per column), got %d", doBlockCount)
}
// Verify IF NOT EXISTS logic
ifNotExistsCount := strings.Count(output, "IF NOT EXISTS")
if ifNotExistsCount < 3 {
t.Errorf("Expected at least 3 IF NOT EXISTS checks (one per column), got %d", ifNotExistsCount)
}
}
func TestWriteAddColumnStatements(t *testing.T) {
// Create a test database
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
// Create a table with a new column to be added
table := models.InitTable("products", "public")
idCol := models.InitColumn("id", "products", "public")
idCol.Type = "integer"
table.Columns["id"] = idCol
// New column with various properties
descCol := models.InitColumn("description", "products", "public")
descCol.Type = "text"
descCol.NotNull = false
table.Columns["description"] = descCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
// Create writer with output to buffer
var buf bytes.Buffer
options := &writers.WriterOptions{}
writer := NewWriter(options)
writer.writer = &buf
// Write ADD COLUMN statements
err := writer.WriteAddColumnStatements(db)
if err != nil {
t.Fatalf("WriteAddColumnStatements failed: %v", err)
}
output := buf.String()
t.Logf("Generated output:\n%s", output)
// Verify output contains expected elements
if !strings.Contains(output, "ALTER TABLE public.products ADD COLUMN id integer") {
t.Errorf("Output missing ADD COLUMN for id\nFull output:\n%s", output)
}
if !strings.Contains(output, "ALTER TABLE public.products ADD COLUMN description text") {
t.Errorf("Output missing ADD COLUMN for description\nFull output:\n%s", output)
}
if !strings.Contains(output, "DO $$") {
t.Errorf("Output missing DO block\nFull output:\n%s", output)
}
}

View File

@@ -4,7 +4,7 @@ The SQL Executor Writer (`sqlexec`) executes SQL scripts from `models.Script` ob
## Features ## Features
- **Ordered Execution**: Scripts execute in Priority→Sequence order - **Ordered Execution**: Scripts execute in Priority→Sequence→Name order
- **PostgreSQL Support**: Uses `pgx/v5` driver for robust PostgreSQL connectivity - **PostgreSQL Support**: Uses `pgx/v5` driver for robust PostgreSQL connectivity
- **Stop on Error**: Execution halts immediately on first error (default behavior) - **Stop on Error**: Execution halts immediately on first error (default behavior)
- **Progress Reporting**: Prints execution status to stdout - **Progress Reporting**: Prints execution status to stdout
@@ -103,19 +103,40 @@ Scripts are sorted and executed based on:
1. **Priority** (ascending): Lower priority values execute first 1. **Priority** (ascending): Lower priority values execute first
2. **Sequence** (ascending): Within same priority, lower sequence values execute first 2. **Sequence** (ascending): Within same priority, lower sequence values execute first
3. **Name** (ascending): Within same priority and sequence, alphabetical order by name
### Example Execution Order ### Example Execution Order
Given these scripts: Given these scripts:
``` ```
Script A: Priority=2, Sequence=1 Script A: Priority=2, Sequence=1, Name="zebra"
Script B: Priority=1, Sequence=3 Script B: Priority=1, Sequence=3, Name="script"
Script C: Priority=1, Sequence=1 Script C: Priority=1, Sequence=1, Name="apple"
Script D: Priority=1, Sequence=2 Script D: Priority=1, Sequence=1, Name="beta"
Script E: Priority=3, Sequence=1 Script E: Priority=3, Sequence=1, Name="script"
``` ```
Execution order: **C → D → B → A → E** Execution order: **C (apple) → D (beta) → B → A → E**
### Directory-based Sorting Example
Given these files:
```
1_001_create_schema.sql
1_001_create_users.sql ← Alphabetically before "drop_tables"
1_001_drop_tables.sql
1_002_add_indexes.sql
2_001_constraints.sql
```
Execution order (note alphabetical sorting at same priority/sequence):
```
1_001_create_schema.sql
1_001_create_users.sql
1_001_drop_tables.sql
1_002_add_indexes.sql
2_001_constraints.sql
```
## Output ## Output

View File

@@ -86,20 +86,23 @@ func (w *Writer) WriteTable(table *models.Table) error {
return fmt.Errorf("WriteTable is not supported for SQL script execution") return fmt.Errorf("WriteTable is not supported for SQL script execution")
} }
// executeScripts executes scripts in Priority then Sequence order // executeScripts executes scripts in Priority, Sequence, then Name order
func (w *Writer) executeScripts(ctx context.Context, conn *pgx.Conn, scripts []*models.Script) error { func (w *Writer) executeScripts(ctx context.Context, conn *pgx.Conn, scripts []*models.Script) error {
if len(scripts) == 0 { if len(scripts) == 0 {
return nil return nil
} }
// Sort scripts by Priority (ascending) then Sequence (ascending) // Sort scripts by Priority (ascending), Sequence (ascending), then Name (ascending)
sortedScripts := make([]*models.Script, len(scripts)) sortedScripts := make([]*models.Script, len(scripts))
copy(sortedScripts, scripts) copy(sortedScripts, scripts)
sort.Slice(sortedScripts, func(i, j int) bool { sort.Slice(sortedScripts, func(i, j int) bool {
if sortedScripts[i].Priority != sortedScripts[j].Priority { if sortedScripts[i].Priority != sortedScripts[j].Priority {
return sortedScripts[i].Priority < sortedScripts[j].Priority return sortedScripts[i].Priority < sortedScripts[j].Priority
} }
return sortedScripts[i].Sequence < sortedScripts[j].Sequence if sortedScripts[i].Sequence != sortedScripts[j].Sequence {
return sortedScripts[i].Sequence < sortedScripts[j].Sequence
}
return sortedScripts[i].Name < sortedScripts[j].Name
}) })
// Execute each script in order // Execute each script in order

View File

@@ -99,13 +99,13 @@ func TestWriter_WriteTable(t *testing.T) {
} }
} }
// TestScriptSorting verifies that scripts are sorted correctly by Priority then Sequence // TestScriptSorting verifies that scripts are sorted correctly by Priority, Sequence, then Name
func TestScriptSorting(t *testing.T) { func TestScriptSorting(t *testing.T) {
scripts := []*models.Script{ scripts := []*models.Script{
{Name: "script1", Priority: 2, Sequence: 1, SQL: "SELECT 1;"}, {Name: "z_script1", Priority: 2, Sequence: 1, SQL: "SELECT 1;"},
{Name: "script2", Priority: 1, Sequence: 3, SQL: "SELECT 2;"}, {Name: "script2", Priority: 1, Sequence: 3, SQL: "SELECT 2;"},
{Name: "script3", Priority: 1, Sequence: 1, SQL: "SELECT 3;"}, {Name: "a_script3", Priority: 1, Sequence: 1, SQL: "SELECT 3;"},
{Name: "script4", Priority: 1, Sequence: 2, SQL: "SELECT 4;"}, {Name: "b_script4", Priority: 1, Sequence: 1, SQL: "SELECT 4;"},
{Name: "script5", Priority: 3, Sequence: 1, SQL: "SELECT 5;"}, {Name: "script5", Priority: 3, Sequence: 1, SQL: "SELECT 5;"},
{Name: "script6", Priority: 2, Sequence: 2, SQL: "SELECT 6;"}, {Name: "script6", Priority: 2, Sequence: 2, SQL: "SELECT 6;"},
} }
@@ -114,25 +114,35 @@ func TestScriptSorting(t *testing.T) {
sortedScripts := make([]*models.Script, len(scripts)) sortedScripts := make([]*models.Script, len(scripts))
copy(sortedScripts, scripts) copy(sortedScripts, scripts)
// Use the same sorting logic from executeScripts // Sort by Priority, Sequence, then Name (matching executeScripts logic)
for i := 0; i < len(sortedScripts)-1; i++ { for i := 0; i < len(sortedScripts)-1; i++ {
for j := i + 1; j < len(sortedScripts); j++ { for j := i + 1; j < len(sortedScripts); j++ {
if sortedScripts[i].Priority > sortedScripts[j].Priority || si, sj := sortedScripts[i], sortedScripts[j]
(sortedScripts[i].Priority == sortedScripts[j].Priority && // Compare by priority first
sortedScripts[i].Sequence > sortedScripts[j].Sequence) { if si.Priority > sj.Priority {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i] sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
} else if si.Priority == sj.Priority {
// If same priority, compare by sequence
if si.Sequence > sj.Sequence {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
} else if si.Sequence == sj.Sequence {
// If same sequence, compare by name
if si.Name > sj.Name {
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
}
}
} }
} }
} }
// Expected order after sorting // Expected order after sorting (Priority -> Sequence -> Name)
expectedOrder := []string{ expectedOrder := []string{
"script3", // Priority 1, Sequence 1 "a_script3", // Priority 1, Sequence 1, Name a_script3
"script4", // Priority 1, Sequence 2 "b_script4", // Priority 1, Sequence 1, Name b_script4
"script2", // Priority 1, Sequence 3 "script2", // Priority 1, Sequence 3
"script1", // Priority 2, Sequence 1 "z_script1", // Priority 2, Sequence 1
"script6", // Priority 2, Sequence 2 "script6", // Priority 2, Sequence 2
"script5", // Priority 3, Sequence 1 "script5", // Priority 3, Sequence 1
} }
for i, expected := range expectedOrder { for i, expected := range expectedOrder {
@@ -153,6 +163,13 @@ func TestScriptSorting(t *testing.T) {
t.Errorf("Sequence not ascending at position %d with same priority %d: %d > %d", t.Errorf("Sequence not ascending at position %d with same priority %d: %d > %d",
i, sortedScripts[i].Priority, sortedScripts[i].Sequence, sortedScripts[i+1].Sequence) i, sortedScripts[i].Priority, sortedScripts[i].Sequence, sortedScripts[i+1].Sequence)
} }
// Within same priority and sequence, names should be ascending
if sortedScripts[i].Priority == sortedScripts[i+1].Priority &&
sortedScripts[i].Sequence == sortedScripts[i+1].Sequence &&
sortedScripts[i].Name > sortedScripts[i+1].Name {
t.Errorf("Name not ascending at position %d with same priority/sequence: %s > %s",
i, sortedScripts[i].Name, sortedScripts[i+1].Name)
}
} }
} }