Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3c20c3c5d9 | |||
| a54594e49b | |||
| cafe6a461f | |||
| abdb9b4c78 | |||
| e7a15c8e4f | |||
| c36b5ede2b | |||
| 51ab29f8e3 | |||
| f532fc110c | |||
| 92dff99725 | |||
| 283b568adb | |||
| 122743ee43 | |||
| 91b6046b9b | |||
| 6f55505444 | |||
| e0e7b64c69 | |||
| 4181cb1fbd | |||
| 120ffc6a5a | |||
| b20ad35485 | |||
| f258f8baeb | |||
| 6388daba56 | |||
| f6c3f2b460 | |||
| 156e655571 |
5
.github/workflows/integration-tests.yml
vendored
5
.github/workflows/integration-tests.yml
vendored
@@ -46,6 +46,11 @@ jobs:
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
|
||||
- name: Install PostgreSQL client
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y postgresql-client
|
||||
|
||||
- name: Initialize test database
|
||||
env:
|
||||
PGPASSWORD: relspec_test_password
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -47,3 +47,4 @@ dist/
|
||||
build/
|
||||
bin/
|
||||
tests/integration/failed_statements_example.txt
|
||||
test_output.log
|
||||
|
||||
@@ -55,6 +55,7 @@ var (
|
||||
mergeSkipSequences bool
|
||||
mergeSkipTables string // Comma-separated table names to skip
|
||||
mergeVerbose bool
|
||||
mergeReportPath string // Path to write merge report
|
||||
)
|
||||
|
||||
var mergeCmd = &cobra.Command{
|
||||
@@ -78,6 +79,12 @@ Examples:
|
||||
--source pgsql --source-conn "postgres://user:pass@localhost/source_db" \
|
||||
--output json --output-path combined.json
|
||||
|
||||
# Merge and execute on PostgreSQL database with report
|
||||
relspec merge --target json --target-path base.json \
|
||||
--source json --source-path additional.json \
|
||||
--output pgsql --output-conn "postgres://user:pass@localhost/target_db" \
|
||||
--merge-report merge-report.json
|
||||
|
||||
# Merge DBML and YAML, skip relations
|
||||
relspec merge --target dbml --target-path schema.dbml \
|
||||
--source yaml --source-path tables.yaml \
|
||||
@@ -115,6 +122,7 @@ func init() {
|
||||
mergeCmd.Flags().BoolVar(&mergeSkipSequences, "skip-sequences", false, "Skip sequences during merge")
|
||||
mergeCmd.Flags().StringVar(&mergeSkipTables, "skip-tables", "", "Comma-separated list of table names to skip during merge")
|
||||
mergeCmd.Flags().BoolVar(&mergeVerbose, "verbose", false, "Show verbose output")
|
||||
mergeCmd.Flags().StringVar(&mergeReportPath, "merge-report", "", "Path to write merge report (JSON format)")
|
||||
}
|
||||
|
||||
func runMerge(cmd *cobra.Command, args []string) error {
|
||||
@@ -229,7 +237,7 @@ func runMerge(cmd *cobra.Command, args []string) error {
|
||||
fmt.Fprintf(os.Stderr, " Path: %s\n", mergeOutputPath)
|
||||
}
|
||||
|
||||
err = writeDatabaseForMerge(mergeOutputType, mergeOutputPath, "", targetDB, "Output")
|
||||
err = writeDatabaseForMerge(mergeOutputType, mergeOutputPath, mergeOutputConn, targetDB, "Output")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write output: %w", err)
|
||||
}
|
||||
@@ -376,7 +384,17 @@ func writeDatabaseForMerge(dbType, filePath, connString string, db *models.Datab
|
||||
}
|
||||
writer = wtypeorm.NewWriter(&writers.WriterOptions{OutputPath: filePath})
|
||||
case "pgsql":
|
||||
writer = wpgsql.NewWriter(&writers.WriterOptions{OutputPath: filePath})
|
||||
writerOpts := &writers.WriterOptions{OutputPath: filePath}
|
||||
if connString != "" {
|
||||
writerOpts.Metadata = map[string]interface{}{
|
||||
"connection_string": connString,
|
||||
}
|
||||
// Add report path if merge report is enabled
|
||||
if mergeReportPath != "" {
|
||||
writerOpts.Metadata["report_path"] = mergeReportPath
|
||||
}
|
||||
}
|
||||
writer = wpgsql.NewWriter(writerOpts)
|
||||
default:
|
||||
return fmt.Errorf("%s: unsupported format '%s'", label, dbType)
|
||||
}
|
||||
|
||||
@@ -14,10 +14,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
scriptsDir string
|
||||
scriptsConn string
|
||||
scriptsSchemaName string
|
||||
scriptsDBName string
|
||||
scriptsDir string
|
||||
scriptsConn string
|
||||
scriptsSchemaName string
|
||||
scriptsDBName string
|
||||
scriptsIgnoreErrors bool
|
||||
)
|
||||
|
||||
var scriptsCmd = &cobra.Command{
|
||||
@@ -39,8 +40,8 @@ Example filenames (hyphen format):
|
||||
1-002-create-posts.sql # Priority 1, Sequence 2
|
||||
10-10-create-newid.pgsql # Priority 10, Sequence 10
|
||||
|
||||
Both formats can be mixed in the same directory.
|
||||
Scripts are executed in order: Priority (ascending), then Sequence (ascending).`,
|
||||
Both formats can be mixed in the same directory and subdirectories.
|
||||
Scripts are executed in order: Priority (ascending), Sequence (ascending), Name (alphabetical).`,
|
||||
}
|
||||
|
||||
var scriptsListCmd = &cobra.Command{
|
||||
@@ -48,8 +49,8 @@ var scriptsListCmd = &cobra.Command{
|
||||
Short: "List SQL scripts from a directory",
|
||||
Long: `List SQL scripts from a directory and show their execution order.
|
||||
|
||||
The scripts are read from the specified directory and displayed in the order
|
||||
they would be executed (Priority ascending, then Sequence ascending).
|
||||
The scripts are read recursively from the specified directory and displayed in the order
|
||||
they would be executed: Priority (ascending), then Sequence (ascending), then Name (alphabetical).
|
||||
|
||||
Example:
|
||||
relspec scripts list --dir ./migrations`,
|
||||
@@ -61,10 +62,10 @@ var scriptsExecuteCmd = &cobra.Command{
|
||||
Short: "Execute SQL scripts against a database",
|
||||
Long: `Execute SQL scripts from a directory against a PostgreSQL database.
|
||||
|
||||
Scripts are executed in order: Priority (ascending), then Sequence (ascending).
|
||||
Execution stops immediately on the first error.
|
||||
Scripts are executed in order: Priority (ascending), Sequence (ascending), Name (alphabetical).
|
||||
By default, execution stops immediately on the first error. Use --ignore-errors to continue execution.
|
||||
|
||||
The directory is scanned recursively for files matching the patterns:
|
||||
The directory is scanned recursively for all subdirectories and files matching the patterns:
|
||||
{priority}_{sequence}_{name}.sql or .pgsql (underscore format)
|
||||
{priority}-{sequence}-{name}.sql or .pgsql (hyphen format)
|
||||
|
||||
@@ -75,7 +76,7 @@ PostgreSQL Connection String Examples:
|
||||
postgresql://user:pass@host/dbname?sslmode=require
|
||||
|
||||
Examples:
|
||||
# Execute migration scripts
|
||||
# Execute migration scripts from a directory (including subdirectories)
|
||||
relspec scripts execute --dir ./migrations \
|
||||
--conn "postgres://user:pass@localhost:5432/mydb"
|
||||
|
||||
@@ -86,7 +87,12 @@ Examples:
|
||||
|
||||
# Execute with SSL disabled
|
||||
relspec scripts execute --dir ./sql \
|
||||
--conn "postgres://user:pass@localhost/db?sslmode=disable"`,
|
||||
--conn "postgres://user:pass@localhost/db?sslmode=disable"
|
||||
|
||||
# Continue executing even if errors occur
|
||||
relspec scripts execute --dir ./migrations \
|
||||
--conn "postgres://localhost/mydb" \
|
||||
--ignore-errors`,
|
||||
RunE: runScriptsExecute,
|
||||
}
|
||||
|
||||
@@ -105,6 +111,7 @@ func init() {
|
||||
scriptsExecuteCmd.Flags().StringVar(&scriptsConn, "conn", "", "PostgreSQL connection string (required)")
|
||||
scriptsExecuteCmd.Flags().StringVar(&scriptsSchemaName, "schema", "public", "Schema name (optional, default: public)")
|
||||
scriptsExecuteCmd.Flags().StringVar(&scriptsDBName, "database", "database", "Database name (optional, default: database)")
|
||||
scriptsExecuteCmd.Flags().BoolVar(&scriptsIgnoreErrors, "ignore-errors", false, "Continue executing scripts even if errors occur")
|
||||
|
||||
err = scriptsExecuteCmd.MarkFlagRequired("dir")
|
||||
if err != nil {
|
||||
@@ -149,7 +156,7 @@ func runScriptsList(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort scripts by Priority then Sequence
|
||||
// Sort scripts by Priority, Sequence, then Name
|
||||
sortedScripts := make([]*struct {
|
||||
name string
|
||||
priority int
|
||||
@@ -186,7 +193,10 @@ func runScriptsList(cmd *cobra.Command, args []string) error {
|
||||
if sortedScripts[i].priority != sortedScripts[j].priority {
|
||||
return sortedScripts[i].priority < sortedScripts[j].priority
|
||||
}
|
||||
return sortedScripts[i].sequence < sortedScripts[j].sequence
|
||||
if sortedScripts[i].sequence != sortedScripts[j].sequence {
|
||||
return sortedScripts[i].sequence < sortedScripts[j].sequence
|
||||
}
|
||||
return sortedScripts[i].name < sortedScripts[j].name
|
||||
})
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Found %d script(s) in execution order:\n\n", len(sortedScripts))
|
||||
@@ -242,22 +252,44 @@ func runScriptsExecute(cmd *cobra.Command, args []string) error {
|
||||
fmt.Fprintf(os.Stderr, " ✓ Found %d script(s)\n\n", len(schema.Scripts))
|
||||
|
||||
// Step 2: Execute scripts
|
||||
fmt.Fprintf(os.Stderr, "[2/2] Executing scripts in order (Priority → Sequence)...\n\n")
|
||||
fmt.Fprintf(os.Stderr, "[2/2] Executing scripts in order (Priority → Sequence → Name)...\n\n")
|
||||
|
||||
writer := sqlexec.NewWriter(&writers.WriterOptions{
|
||||
Metadata: map[string]any{
|
||||
"connection_string": scriptsConn,
|
||||
"ignore_errors": scriptsIgnoreErrors,
|
||||
},
|
||||
})
|
||||
|
||||
if err := writer.WriteSchema(schema); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
return fmt.Errorf("execution failed: %w", err)
|
||||
return fmt.Errorf("script execution failed: %w", err)
|
||||
}
|
||||
|
||||
// Get execution results from writer metadata
|
||||
totalCount := len(schema.Scripts)
|
||||
successCount := totalCount
|
||||
failedCount := 0
|
||||
|
||||
opts := writer.Options()
|
||||
if total, exists := opts.Metadata["execution_total"].(int); exists {
|
||||
totalCount = total
|
||||
}
|
||||
if success, exists := opts.Metadata["execution_success"].(int); exists {
|
||||
successCount = success
|
||||
}
|
||||
if failed, exists := opts.Metadata["execution_failed"].(int); exists {
|
||||
failedCount = failed
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "\n=== Execution Complete ===\n")
|
||||
fmt.Fprintf(os.Stderr, "Completed at: %s\n", getCurrentTimestamp())
|
||||
fmt.Fprintf(os.Stderr, "Successfully executed %d script(s)\n\n", len(schema.Scripts))
|
||||
fmt.Fprintf(os.Stderr, "Total scripts: %d\n", totalCount)
|
||||
fmt.Fprintf(os.Stderr, "Successful: %d\n", successCount)
|
||||
if failedCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Failed: %d\n", failedCount)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,31 +4,31 @@ import "strings"
|
||||
|
||||
var GoToStdTypes = map[string]string{
|
||||
"bool": "boolean",
|
||||
"int64": "integer",
|
||||
"int64": "bigint",
|
||||
"int": "integer",
|
||||
"int8": "integer",
|
||||
"int16": "integer",
|
||||
"int8": "smallint",
|
||||
"int16": "smallint",
|
||||
"int32": "integer",
|
||||
"uint": "integer",
|
||||
"uint8": "integer",
|
||||
"uint16": "integer",
|
||||
"uint8": "smallint",
|
||||
"uint16": "smallint",
|
||||
"uint32": "integer",
|
||||
"uint64": "integer",
|
||||
"uintptr": "integer",
|
||||
"znullint64": "integer",
|
||||
"uint64": "bigint",
|
||||
"uintptr": "bigint",
|
||||
"znullint64": "bigint",
|
||||
"znullint32": "integer",
|
||||
"znullbyte": "integer",
|
||||
"znullbyte": "smallint",
|
||||
"float64": "double",
|
||||
"float32": "double",
|
||||
"complex64": "double",
|
||||
"complex128": "double",
|
||||
"customfloat64": "double",
|
||||
"string": "string",
|
||||
"Pointer": "integer",
|
||||
"string": "text",
|
||||
"Pointer": "bigint",
|
||||
"[]byte": "blob",
|
||||
"customdate": "string",
|
||||
"customtime": "string",
|
||||
"customtimestamp": "string",
|
||||
"customdate": "date",
|
||||
"customtime": "time",
|
||||
"customtimestamp": "timestamp",
|
||||
"sqlfloat64": "double",
|
||||
"sqlfloat16": "double",
|
||||
"sqluuid": "uuid",
|
||||
@@ -36,9 +36,9 @@ var GoToStdTypes = map[string]string{
|
||||
"sqljson": "json",
|
||||
"sqlint64": "bigint",
|
||||
"sqlint32": "integer",
|
||||
"sqlint16": "integer",
|
||||
"sqlint16": "smallint",
|
||||
"sqlbool": "boolean",
|
||||
"sqlstring": "string",
|
||||
"sqlstring": "text",
|
||||
"nullablejsonb": "jsonb",
|
||||
"nullablejson": "json",
|
||||
"nullableuuid": "uuid",
|
||||
@@ -67,7 +67,7 @@ var GoToPGSQLTypes = map[string]string{
|
||||
"float32": "real",
|
||||
"complex64": "double precision",
|
||||
"complex128": "double precision",
|
||||
"customfloat64": "double precisio",
|
||||
"customfloat64": "double precision",
|
||||
"string": "text",
|
||||
"Pointer": "bigint",
|
||||
"[]byte": "bytea",
|
||||
@@ -81,9 +81,9 @@ var GoToPGSQLTypes = map[string]string{
|
||||
"sqljson": "json",
|
||||
"sqlint64": "bigint",
|
||||
"sqlint32": "integer",
|
||||
"sqlint16": "integer",
|
||||
"sqlint16": "smallint",
|
||||
"sqlbool": "boolean",
|
||||
"sqlstring": "string",
|
||||
"sqlstring": "text",
|
||||
"nullablejsonb": "jsonb",
|
||||
"nullablejson": "json",
|
||||
"nullableuuid": "uuid",
|
||||
|
||||
@@ -632,6 +632,9 @@ func (r *Reader) parseColumn(fieldName string, fieldType ast.Expr, tag string, s
|
||||
column.Name = parts[0]
|
||||
}
|
||||
|
||||
// Track if we found explicit nullability markers
|
||||
hasExplicitNullableMarker := false
|
||||
|
||||
// Parse tag attributes
|
||||
for _, part := range parts[1:] {
|
||||
kv := strings.SplitN(part, ":", 2)
|
||||
@@ -649,6 +652,10 @@ func (r *Reader) parseColumn(fieldName string, fieldType ast.Expr, tag string, s
|
||||
column.IsPrimaryKey = true
|
||||
case "notnull":
|
||||
column.NotNull = true
|
||||
hasExplicitNullableMarker = true
|
||||
case "nullzero":
|
||||
column.NotNull = false
|
||||
hasExplicitNullableMarker = true
|
||||
case "autoincrement":
|
||||
column.AutoIncrement = true
|
||||
case "default":
|
||||
@@ -664,17 +671,15 @@ func (r *Reader) parseColumn(fieldName string, fieldType ast.Expr, tag string, s
|
||||
|
||||
// Determine if nullable based on Go type and bun tags
|
||||
// In Bun:
|
||||
// - nullzero tag means the field is nullable (can be NULL in DB)
|
||||
// - absence of nullzero means the field is NOT NULL
|
||||
// - primitive types (int64, bool, string) are NOT NULL by default
|
||||
column.NotNull = true
|
||||
// Primary keys are always NOT NULL
|
||||
|
||||
if strings.Contains(bunTag, "nullzero") {
|
||||
column.NotNull = false
|
||||
} else {
|
||||
// - explicit "notnull" tag means NOT NULL
|
||||
// - explicit "nullzero" tag means nullable
|
||||
// - absence of explicit markers: infer from Go type
|
||||
if !hasExplicitNullableMarker {
|
||||
// Infer from Go type if no explicit marker found
|
||||
column.NotNull = !r.isNullableGoType(fieldType)
|
||||
}
|
||||
|
||||
// Primary keys are always NOT NULL
|
||||
if column.IsPrimaryKey {
|
||||
column.NotNull = true
|
||||
}
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
@@ -24,11 +26,23 @@ func NewReader(options *readers.ReaderOptions) *Reader {
|
||||
}
|
||||
|
||||
// ReadDatabase reads and parses DBML input, returning a Database model
|
||||
// If FilePath points to a directory, all .dbml files are loaded and merged
|
||||
func (r *Reader) ReadDatabase() (*models.Database, error) {
|
||||
if r.options.FilePath == "" {
|
||||
return nil, fmt.Errorf("file path is required for DBML reader")
|
||||
}
|
||||
|
||||
// Check if path is a directory
|
||||
info, err := os.Stat(r.options.FilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to stat path: %w", err)
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return r.readDirectoryDBML(r.options.FilePath)
|
||||
}
|
||||
|
||||
// Single file - existing logic
|
||||
content, err := os.ReadFile(r.options.FilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read file: %w", err)
|
||||
@@ -67,15 +81,341 @@ func (r *Reader) ReadTable() (*models.Table, error) {
|
||||
return schema.Tables[0], nil
|
||||
}
|
||||
|
||||
// stripQuotes removes surrounding quotes from an identifier
|
||||
// readDirectoryDBML processes all .dbml files in directory
|
||||
// Returns merged Database model
|
||||
func (r *Reader) readDirectoryDBML(dirPath string) (*models.Database, error) {
|
||||
// Discover and sort DBML files
|
||||
files, err := r.discoverDBMLFiles(dirPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discover DBML files: %w", err)
|
||||
}
|
||||
|
||||
// If no files found, return empty database
|
||||
if len(files) == 0 {
|
||||
db := models.InitDatabase("database")
|
||||
if r.options.Metadata != nil {
|
||||
if name, ok := r.options.Metadata["name"].(string); ok {
|
||||
db.Name = name
|
||||
}
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Initialize database (will be merged with files)
|
||||
var db *models.Database
|
||||
|
||||
// Process each file in sorted order
|
||||
for _, filePath := range files {
|
||||
content, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read file %s: %w", filePath, err)
|
||||
}
|
||||
|
||||
fileDB, err := r.parseDBML(string(content))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse file %s: %w", filePath, err)
|
||||
}
|
||||
|
||||
// First file initializes the database
|
||||
if db == nil {
|
||||
db = fileDB
|
||||
} else {
|
||||
// Subsequent files are merged
|
||||
mergeDatabase(db, fileDB)
|
||||
}
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// splitIdentifier splits a dotted identifier while respecting quotes
|
||||
// Handles cases like: "schema.with.dots"."table"."column"
|
||||
func splitIdentifier(s string) []string {
|
||||
var parts []string
|
||||
var current strings.Builder
|
||||
inQuote := false
|
||||
quoteChar := byte(0)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
ch := s[i]
|
||||
|
||||
if !inQuote {
|
||||
switch ch {
|
||||
case '"', '\'':
|
||||
inQuote = true
|
||||
quoteChar = ch
|
||||
current.WriteByte(ch)
|
||||
case '.':
|
||||
if current.Len() > 0 {
|
||||
parts = append(parts, current.String())
|
||||
current.Reset()
|
||||
}
|
||||
default:
|
||||
current.WriteByte(ch)
|
||||
}
|
||||
} else {
|
||||
current.WriteByte(ch)
|
||||
if ch == quoteChar {
|
||||
inQuote = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if current.Len() > 0 {
|
||||
parts = append(parts, current.String())
|
||||
}
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
// stripQuotes removes surrounding quotes and comments from an identifier
|
||||
func stripQuotes(s string) string {
|
||||
s = strings.TrimSpace(s)
|
||||
|
||||
// Remove DBML comments in brackets (e.g., [note: 'description'])
|
||||
// This handles inline comments like: "table_name" [note: 'comment']
|
||||
commentRegex := regexp.MustCompile(`\s*\[.*?\]\s*`)
|
||||
s = commentRegex.ReplaceAllString(s, "")
|
||||
|
||||
// Trim again after removing comments
|
||||
s = strings.TrimSpace(s)
|
||||
|
||||
// Remove surrounding quotes (double or single)
|
||||
if len(s) >= 2 && ((s[0] == '"' && s[len(s)-1] == '"') || (s[0] == '\'' && s[len(s)-1] == '\'')) {
|
||||
return s[1 : len(s)-1]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// parseFilePrefix extracts numeric prefix from filename
|
||||
// Examples: "1_schema.dbml" -> (1, true), "tables.dbml" -> (0, false)
|
||||
func parseFilePrefix(filename string) (int, bool) {
|
||||
base := filepath.Base(filename)
|
||||
re := regexp.MustCompile(`^(\d+)[_-]`)
|
||||
matches := re.FindStringSubmatch(base)
|
||||
if len(matches) > 1 {
|
||||
var prefix int
|
||||
_, err := fmt.Sscanf(matches[1], "%d", &prefix)
|
||||
if err == nil {
|
||||
return prefix, true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// hasCommentedRefs scans file content for commented-out Ref statements
|
||||
// Returns true if file contains lines like: // Ref: table.col > other.col
|
||||
func hasCommentedRefs(filePath string) (bool, error) {
|
||||
content, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(content)))
|
||||
commentedRefRegex := regexp.MustCompile(`^\s*//.*Ref:\s+`)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if commentedRefRegex.MatchString(line) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// discoverDBMLFiles finds all .dbml files in directory and returns them sorted
|
||||
func (r *Reader) discoverDBMLFiles(dirPath string) ([]string, error) {
|
||||
pattern := filepath.Join(dirPath, "*.dbml")
|
||||
files, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to glob .dbml files: %w", err)
|
||||
}
|
||||
|
||||
return sortDBMLFiles(files), nil
|
||||
}
|
||||
|
||||
// sortDBMLFiles sorts files by:
|
||||
// 1. Files without commented refs (by numeric prefix, then alphabetically)
|
||||
// 2. Files with commented refs (by numeric prefix, then alphabetically)
|
||||
func sortDBMLFiles(files []string) []string {
|
||||
// Create a slice to hold file info for sorting
|
||||
type fileInfo struct {
|
||||
path string
|
||||
hasCommented bool
|
||||
prefix int
|
||||
hasPrefix bool
|
||||
basename string
|
||||
}
|
||||
|
||||
fileInfos := make([]fileInfo, 0, len(files))
|
||||
|
||||
for _, file := range files {
|
||||
hasCommented, err := hasCommentedRefs(file)
|
||||
if err != nil {
|
||||
// If we can't read the file, treat it as not having commented refs
|
||||
hasCommented = false
|
||||
}
|
||||
|
||||
prefix, hasPrefix := parseFilePrefix(file)
|
||||
basename := filepath.Base(file)
|
||||
|
||||
fileInfos = append(fileInfos, fileInfo{
|
||||
path: file,
|
||||
hasCommented: hasCommented,
|
||||
prefix: prefix,
|
||||
hasPrefix: hasPrefix,
|
||||
basename: basename,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort by: hasCommented (false first), hasPrefix (true first), prefix, basename
|
||||
sort.Slice(fileInfos, func(i, j int) bool {
|
||||
// First, sort by commented refs (files without commented refs come first)
|
||||
if fileInfos[i].hasCommented != fileInfos[j].hasCommented {
|
||||
return !fileInfos[i].hasCommented
|
||||
}
|
||||
|
||||
// Then by presence of prefix (files with prefix come first)
|
||||
if fileInfos[i].hasPrefix != fileInfos[j].hasPrefix {
|
||||
return fileInfos[i].hasPrefix
|
||||
}
|
||||
|
||||
// If both have prefix, sort by prefix value
|
||||
if fileInfos[i].hasPrefix && fileInfos[j].hasPrefix {
|
||||
if fileInfos[i].prefix != fileInfos[j].prefix {
|
||||
return fileInfos[i].prefix < fileInfos[j].prefix
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, sort alphabetically by basename
|
||||
return fileInfos[i].basename < fileInfos[j].basename
|
||||
})
|
||||
|
||||
// Extract sorted paths
|
||||
sortedFiles := make([]string, len(fileInfos))
|
||||
for i, info := range fileInfos {
|
||||
sortedFiles[i] = info.path
|
||||
}
|
||||
|
||||
return sortedFiles
|
||||
}
|
||||
|
||||
// mergeTable combines two table definitions
|
||||
// Merges: Columns (map), Constraints (map), Indexes (map), Relationships (map)
|
||||
// Uses first non-empty Description
|
||||
func mergeTable(baseTable, fileTable *models.Table) {
|
||||
// Merge columns (map naturally merges - later keys overwrite)
|
||||
for key, col := range fileTable.Columns {
|
||||
baseTable.Columns[key] = col
|
||||
}
|
||||
|
||||
// Merge constraints
|
||||
for key, constraint := range fileTable.Constraints {
|
||||
baseTable.Constraints[key] = constraint
|
||||
}
|
||||
|
||||
// Merge indexes
|
||||
for key, index := range fileTable.Indexes {
|
||||
baseTable.Indexes[key] = index
|
||||
}
|
||||
|
||||
// Merge relationships
|
||||
for key, rel := range fileTable.Relationships {
|
||||
baseTable.Relationships[key] = rel
|
||||
}
|
||||
|
||||
// Use first non-empty description
|
||||
if baseTable.Description == "" && fileTable.Description != "" {
|
||||
baseTable.Description = fileTable.Description
|
||||
}
|
||||
|
||||
// Merge metadata maps
|
||||
if baseTable.Metadata == nil {
|
||||
baseTable.Metadata = make(map[string]any)
|
||||
}
|
||||
for key, val := range fileTable.Metadata {
|
||||
baseTable.Metadata[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
// mergeSchema finds or creates schema and merges tables
|
||||
func mergeSchema(baseDB *models.Database, fileSchema *models.Schema) {
|
||||
// Find existing schema by name (normalize names by stripping quotes)
|
||||
var existingSchema *models.Schema
|
||||
fileSchemaName := stripQuotes(fileSchema.Name)
|
||||
for _, schema := range baseDB.Schemas {
|
||||
if stripQuotes(schema.Name) == fileSchemaName {
|
||||
existingSchema = schema
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If schema doesn't exist, add it and return
|
||||
if existingSchema == nil {
|
||||
baseDB.Schemas = append(baseDB.Schemas, fileSchema)
|
||||
return
|
||||
}
|
||||
|
||||
// Merge tables from fileSchema into existingSchema
|
||||
for _, fileTable := range fileSchema.Tables {
|
||||
// Find existing table by name (normalize names by stripping quotes)
|
||||
var existingTable *models.Table
|
||||
fileTableName := stripQuotes(fileTable.Name)
|
||||
for _, table := range existingSchema.Tables {
|
||||
if stripQuotes(table.Name) == fileTableName {
|
||||
existingTable = table
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If table doesn't exist, add it
|
||||
if existingTable == nil {
|
||||
existingSchema.Tables = append(existingSchema.Tables, fileTable)
|
||||
} else {
|
||||
// Merge table properties - tables are identical, skip
|
||||
mergeTable(existingTable, fileTable)
|
||||
}
|
||||
}
|
||||
|
||||
// Merge other schema properties
|
||||
existingSchema.Views = append(existingSchema.Views, fileSchema.Views...)
|
||||
existingSchema.Sequences = append(existingSchema.Sequences, fileSchema.Sequences...)
|
||||
existingSchema.Scripts = append(existingSchema.Scripts, fileSchema.Scripts...)
|
||||
|
||||
// Merge permissions
|
||||
if existingSchema.Permissions == nil {
|
||||
existingSchema.Permissions = make(map[string]string)
|
||||
}
|
||||
for key, val := range fileSchema.Permissions {
|
||||
existingSchema.Permissions[key] = val
|
||||
}
|
||||
|
||||
// Merge metadata
|
||||
if existingSchema.Metadata == nil {
|
||||
existingSchema.Metadata = make(map[string]any)
|
||||
}
|
||||
for key, val := range fileSchema.Metadata {
|
||||
existingSchema.Metadata[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
// mergeDatabase merges schemas from fileDB into baseDB
|
||||
func mergeDatabase(baseDB, fileDB *models.Database) {
|
||||
// Merge each schema from fileDB
|
||||
for _, fileSchema := range fileDB.Schemas {
|
||||
mergeSchema(baseDB, fileSchema)
|
||||
}
|
||||
|
||||
// Merge domains
|
||||
baseDB.Domains = append(baseDB.Domains, fileDB.Domains...)
|
||||
|
||||
// Use first non-empty description
|
||||
if baseDB.Description == "" && fileDB.Description != "" {
|
||||
baseDB.Description = fileDB.Description
|
||||
}
|
||||
}
|
||||
|
||||
// parseDBML parses DBML content and returns a Database model
|
||||
func (r *Reader) parseDBML(content string) (*models.Database, error) {
|
||||
db := models.InitDatabase("database")
|
||||
@@ -109,7 +449,9 @@ func (r *Reader) parseDBML(content string) (*models.Database, error) {
|
||||
// Parse Table definition
|
||||
if matches := tableRegex.FindStringSubmatch(line); matches != nil {
|
||||
tableName := matches[1]
|
||||
parts := strings.Split(tableName, ".")
|
||||
// Strip comments/notes before parsing to avoid dots in notes
|
||||
tableName = strings.TrimSpace(regexp.MustCompile(`\s*\[.*?\]\s*`).ReplaceAllString(tableName, ""))
|
||||
parts := splitIdentifier(tableName)
|
||||
|
||||
if len(parts) == 2 {
|
||||
currentSchema = stripQuotes(parts[0])
|
||||
@@ -262,7 +604,7 @@ func (r *Reader) parseColumn(line, tableName, schemaName string) (*models.Column
|
||||
} else if attr == "unique" {
|
||||
// Create a unique constraint
|
||||
uniqueConstraint := models.InitConstraint(
|
||||
fmt.Sprintf("uq_%s", columnName),
|
||||
fmt.Sprintf("uq_%s_%s", tableName, columnName),
|
||||
models.UniqueConstraint,
|
||||
)
|
||||
uniqueConstraint.Schema = schemaName
|
||||
@@ -287,10 +629,10 @@ func (r *Reader) parseColumn(line, tableName, schemaName string) (*models.Column
|
||||
refOp := strings.TrimSpace(refStr)
|
||||
var isReverse bool
|
||||
if strings.HasPrefix(refOp, "<") {
|
||||
isReverse = column.IsPrimaryKey // < on PK means "is referenced by" (reverse)
|
||||
} else if strings.HasPrefix(refOp, ">") {
|
||||
isReverse = !column.IsPrimaryKey // > on FK means reverse
|
||||
// < means "is referenced by" - only makes sense on PK columns
|
||||
isReverse = column.IsPrimaryKey
|
||||
}
|
||||
// > means "references" - always a forward FK, never reverse
|
||||
|
||||
constraint = r.parseRef(refStr)
|
||||
if constraint != nil {
|
||||
@@ -310,8 +652,8 @@ func (r *Reader) parseColumn(line, tableName, schemaName string) (*models.Column
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = []string{columnName}
|
||||
}
|
||||
// Generate short constraint name based on the column
|
||||
constraint.Name = fmt.Sprintf("fk_%s", constraint.Columns[0])
|
||||
// Generate constraint name based on table and columns
|
||||
constraint.Name = fmt.Sprintf("fk_%s_%s", constraint.Table, strings.Join(constraint.Columns, "_"))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -332,27 +674,31 @@ func (r *Reader) parseIndex(line, tableName, schemaName string) *models.Index {
|
||||
// Format: (columns) [attributes] OR columnname [attributes]
|
||||
var columns []string
|
||||
|
||||
if strings.Contains(line, "(") && strings.Contains(line, ")") {
|
||||
// Find the attributes section to avoid parsing parentheses in notes/attributes
|
||||
attrStart := strings.Index(line, "[")
|
||||
columnPart := line
|
||||
if attrStart > 0 {
|
||||
columnPart = line[:attrStart]
|
||||
}
|
||||
|
||||
if strings.Contains(columnPart, "(") && strings.Contains(columnPart, ")") {
|
||||
// Multi-column format: (col1, col2) [attributes]
|
||||
colStart := strings.Index(line, "(")
|
||||
colEnd := strings.Index(line, ")")
|
||||
colStart := strings.Index(columnPart, "(")
|
||||
colEnd := strings.Index(columnPart, ")")
|
||||
if colStart >= colEnd {
|
||||
return nil
|
||||
}
|
||||
|
||||
columnsStr := line[colStart+1 : colEnd]
|
||||
columnsStr := columnPart[colStart+1 : colEnd]
|
||||
for _, col := range strings.Split(columnsStr, ",") {
|
||||
columns = append(columns, stripQuotes(strings.TrimSpace(col)))
|
||||
}
|
||||
} else if strings.Contains(line, "[") {
|
||||
} else if attrStart > 0 {
|
||||
// Single column format: columnname [attributes]
|
||||
// Extract column name before the bracket
|
||||
idx := strings.Index(line, "[")
|
||||
if idx > 0 {
|
||||
colName := strings.TrimSpace(line[:idx])
|
||||
if colName != "" {
|
||||
columns = []string{stripQuotes(colName)}
|
||||
}
|
||||
colName := strings.TrimSpace(columnPart)
|
||||
if colName != "" {
|
||||
columns = []string{stripQuotes(colName)}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -391,7 +737,11 @@ func (r *Reader) parseIndex(line, tableName, schemaName string) *models.Index {
|
||||
|
||||
// Generate name if not provided
|
||||
if index.Name == "" {
|
||||
index.Name = fmt.Sprintf("idx_%s_%s", tableName, strings.Join(columns, "_"))
|
||||
prefix := "idx"
|
||||
if index.Unique {
|
||||
prefix = "uidx"
|
||||
}
|
||||
index.Name = fmt.Sprintf("%s_%s_%s", prefix, tableName, strings.Join(columns, "_"))
|
||||
}
|
||||
|
||||
return index
|
||||
@@ -451,10 +801,10 @@ func (r *Reader) parseRef(refStr string) *models.Constraint {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate short constraint name based on the source column
|
||||
constraintName := fmt.Sprintf("fk_%s_%s", fromTable, toTable)
|
||||
if len(fromColumns) > 0 {
|
||||
constraintName = fmt.Sprintf("fk_%s", fromColumns[0])
|
||||
// Generate constraint name based on table and columns
|
||||
constraintName := fmt.Sprintf("fk_%s_%s", fromTable, strings.Join(fromColumns, "_"))
|
||||
if len(fromColumns) == 0 {
|
||||
constraintName = fmt.Sprintf("fk_%s_%s", fromTable, toTable)
|
||||
}
|
||||
|
||||
constraint := models.InitConstraint(
|
||||
@@ -510,7 +860,7 @@ func (r *Reader) parseTableRef(ref string) (schema, table string, columns []stri
|
||||
}
|
||||
|
||||
// Parse schema, table, and optionally column
|
||||
parts := strings.Split(strings.TrimSpace(ref), ".")
|
||||
parts := splitIdentifier(strings.TrimSpace(ref))
|
||||
if len(parts) == 3 {
|
||||
// Format: "schema"."table"."column"
|
||||
schema = stripQuotes(parts[0])
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package dbml
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@@ -517,3 +518,356 @@ func TestGetForeignKeys(t *testing.T) {
|
||||
t.Error("Expected foreign key constraint type")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for multi-file directory loading
|
||||
|
||||
func TestReadDirectory_MultipleFiles(t *testing.T) {
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "dbml", "multifile"),
|
||||
}
|
||||
|
||||
reader := NewReader(opts)
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDatabase() error = %v", err)
|
||||
}
|
||||
|
||||
if db == nil {
|
||||
t.Fatal("ReadDatabase() returned nil database")
|
||||
}
|
||||
|
||||
// Should have public schema
|
||||
if len(db.Schemas) == 0 {
|
||||
t.Fatal("Expected at least one schema")
|
||||
}
|
||||
|
||||
var publicSchema *models.Schema
|
||||
for _, schema := range db.Schemas {
|
||||
if schema.Name == "public" {
|
||||
publicSchema = schema
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if publicSchema == nil {
|
||||
t.Fatal("Public schema not found")
|
||||
}
|
||||
|
||||
// Should have 3 tables: users, posts, comments
|
||||
if len(publicSchema.Tables) != 3 {
|
||||
t.Fatalf("Expected 3 tables, got %d", len(publicSchema.Tables))
|
||||
}
|
||||
|
||||
// Find tables
|
||||
var usersTable, postsTable, commentsTable *models.Table
|
||||
for _, table := range publicSchema.Tables {
|
||||
switch table.Name {
|
||||
case "users":
|
||||
usersTable = table
|
||||
case "posts":
|
||||
postsTable = table
|
||||
case "comments":
|
||||
commentsTable = table
|
||||
}
|
||||
}
|
||||
|
||||
if usersTable == nil {
|
||||
t.Fatal("Users table not found")
|
||||
}
|
||||
if postsTable == nil {
|
||||
t.Fatal("Posts table not found")
|
||||
}
|
||||
if commentsTable == nil {
|
||||
t.Fatal("Comments table not found")
|
||||
}
|
||||
|
||||
// Verify users table has merged columns from 1_users.dbml and 3_add_columns.dbml
|
||||
expectedUserColumns := []string{"id", "email", "name", "created_at"}
|
||||
if len(usersTable.Columns) != len(expectedUserColumns) {
|
||||
t.Errorf("Expected %d columns in users table, got %d", len(expectedUserColumns), len(usersTable.Columns))
|
||||
}
|
||||
|
||||
for _, colName := range expectedUserColumns {
|
||||
if _, exists := usersTable.Columns[colName]; !exists {
|
||||
t.Errorf("Expected column '%s' in users table", colName)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify posts table columns
|
||||
expectedPostColumns := []string{"id", "user_id", "title", "content", "created_at"}
|
||||
for _, colName := range expectedPostColumns {
|
||||
if _, exists := postsTable.Columns[colName]; !exists {
|
||||
t.Errorf("Expected column '%s' in posts table", colName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDirectory_TableMerging(t *testing.T) {
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "dbml", "multifile"),
|
||||
}
|
||||
|
||||
reader := NewReader(opts)
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDatabase() error = %v", err)
|
||||
}
|
||||
|
||||
// Find users table
|
||||
var usersTable *models.Table
|
||||
for _, schema := range db.Schemas {
|
||||
for _, table := range schema.Tables {
|
||||
if table.Name == "users" && schema.Name == "public" {
|
||||
usersTable = table
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if usersTable == nil {
|
||||
t.Fatal("Users table not found")
|
||||
}
|
||||
|
||||
// Verify columns from file 1 (id, email)
|
||||
if _, exists := usersTable.Columns["id"]; !exists {
|
||||
t.Error("Column 'id' from 1_users.dbml not found")
|
||||
}
|
||||
if _, exists := usersTable.Columns["email"]; !exists {
|
||||
t.Error("Column 'email' from 1_users.dbml not found")
|
||||
}
|
||||
|
||||
// Verify columns from file 3 (name, created_at)
|
||||
if _, exists := usersTable.Columns["name"]; !exists {
|
||||
t.Error("Column 'name' from 3_add_columns.dbml not found")
|
||||
}
|
||||
if _, exists := usersTable.Columns["created_at"]; !exists {
|
||||
t.Error("Column 'created_at' from 3_add_columns.dbml not found")
|
||||
}
|
||||
|
||||
// Verify column properties from file 1
|
||||
emailCol := usersTable.Columns["email"]
|
||||
if !emailCol.NotNull {
|
||||
t.Error("Email column should be not null (from 1_users.dbml)")
|
||||
}
|
||||
if emailCol.Type != "varchar(255)" {
|
||||
t.Errorf("Expected email type 'varchar(255)', got '%s'", emailCol.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDirectory_CommentedRefsLast(t *testing.T) {
|
||||
// This test verifies that files with commented refs are processed last
|
||||
// by checking that the file discovery returns them in the correct order
|
||||
dirPath := filepath.Join("..", "..", "..", "tests", "assets", "dbml", "multifile")
|
||||
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: dirPath,
|
||||
}
|
||||
|
||||
reader := NewReader(opts)
|
||||
files, err := reader.discoverDBMLFiles(dirPath)
|
||||
if err != nil {
|
||||
t.Fatalf("discoverDBMLFiles() error = %v", err)
|
||||
}
|
||||
|
||||
if len(files) < 2 {
|
||||
t.Skip("Not enough files to test ordering")
|
||||
}
|
||||
|
||||
// Check that 9_refs.dbml (which has commented refs) comes last
|
||||
lastFile := filepath.Base(files[len(files)-1])
|
||||
if lastFile != "9_refs.dbml" {
|
||||
t.Errorf("Expected last file to be '9_refs.dbml' (has commented refs), got '%s'", lastFile)
|
||||
}
|
||||
|
||||
// Check that numbered files without commented refs come first
|
||||
firstFile := filepath.Base(files[0])
|
||||
if firstFile != "1_users.dbml" {
|
||||
t.Errorf("Expected first file to be '1_users.dbml', got '%s'", firstFile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDirectory_EmptyDirectory(t *testing.T) {
|
||||
// Create a temporary empty directory
|
||||
tmpDir := filepath.Join("..", "..", "..", "tests", "assets", "dbml", "empty_test_dir")
|
||||
err := os.MkdirAll(tmpDir, 0755)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: tmpDir,
|
||||
}
|
||||
|
||||
reader := NewReader(opts)
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDatabase() should not error on empty directory, got: %v", err)
|
||||
}
|
||||
|
||||
if db == nil {
|
||||
t.Fatal("ReadDatabase() returned nil database")
|
||||
}
|
||||
|
||||
// Empty directory should return empty database
|
||||
if len(db.Schemas) != 0 {
|
||||
t.Errorf("Expected 0 schemas for empty directory, got %d", len(db.Schemas))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDatabase_BackwardCompat(t *testing.T) {
|
||||
// Test that single file loading still works
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "dbml", "simple.dbml"),
|
||||
}
|
||||
|
||||
reader := NewReader(opts)
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDatabase() error = %v", err)
|
||||
}
|
||||
|
||||
if db == nil {
|
||||
t.Fatal("ReadDatabase() returned nil database")
|
||||
}
|
||||
|
||||
if len(db.Schemas) == 0 {
|
||||
t.Fatal("Expected at least one schema")
|
||||
}
|
||||
|
||||
schema := db.Schemas[0]
|
||||
if len(schema.Tables) != 1 {
|
||||
t.Fatalf("Expected 1 table, got %d", len(schema.Tables))
|
||||
}
|
||||
|
||||
table := schema.Tables[0]
|
||||
if table.Name != "users" {
|
||||
t.Errorf("Expected table name 'users', got '%s'", table.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFilePrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
filename string
|
||||
wantPrefix int
|
||||
wantHas bool
|
||||
}{
|
||||
{"1_schema.dbml", 1, true},
|
||||
{"2_tables.dbml", 2, true},
|
||||
{"10_relationships.dbml", 10, true},
|
||||
{"99_data.dbml", 99, true},
|
||||
{"schema.dbml", 0, false},
|
||||
{"tables_no_prefix.dbml", 0, false},
|
||||
{"/path/to/1_file.dbml", 1, true},
|
||||
{"/path/to/file.dbml", 0, false},
|
||||
{"1-file.dbml", 1, true},
|
||||
{"2-another.dbml", 2, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.filename, func(t *testing.T) {
|
||||
gotPrefix, gotHas := parseFilePrefix(tt.filename)
|
||||
if gotPrefix != tt.wantPrefix {
|
||||
t.Errorf("parseFilePrefix(%s) prefix = %d, want %d", tt.filename, gotPrefix, tt.wantPrefix)
|
||||
}
|
||||
if gotHas != tt.wantHas {
|
||||
t.Errorf("parseFilePrefix(%s) hasPrefix = %v, want %v", tt.filename, gotHas, tt.wantHas)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstraintNaming(t *testing.T) {
|
||||
// Test that constraints are named with proper prefixes
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "dbml", "complex.dbml"),
|
||||
}
|
||||
|
||||
reader := NewReader(opts)
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDatabase() error = %v", err)
|
||||
}
|
||||
|
||||
// Find users table
|
||||
var usersTable *models.Table
|
||||
var postsTable *models.Table
|
||||
for _, schema := range db.Schemas {
|
||||
for _, table := range schema.Tables {
|
||||
if table.Name == "users" {
|
||||
usersTable = table
|
||||
} else if table.Name == "posts" {
|
||||
postsTable = table
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if usersTable == nil {
|
||||
t.Fatal("Users table not found")
|
||||
}
|
||||
if postsTable == nil {
|
||||
t.Fatal("Posts table not found")
|
||||
}
|
||||
|
||||
// Test unique constraint naming: uq_table_column
|
||||
if _, exists := usersTable.Constraints["uq_users_email"]; !exists {
|
||||
t.Error("Expected unique constraint 'uq_users_email' not found")
|
||||
t.Logf("Available constraints: %v", getKeys(usersTable.Constraints))
|
||||
}
|
||||
|
||||
if _, exists := postsTable.Constraints["uq_posts_slug"]; !exists {
|
||||
t.Error("Expected unique constraint 'uq_posts_slug' not found")
|
||||
t.Logf("Available constraints: %v", getKeys(postsTable.Constraints))
|
||||
}
|
||||
|
||||
// Test foreign key naming: fk_table_column
|
||||
if _, exists := postsTable.Constraints["fk_posts_user_id"]; !exists {
|
||||
t.Error("Expected foreign key 'fk_posts_user_id' not found")
|
||||
t.Logf("Available constraints: %v", getKeys(postsTable.Constraints))
|
||||
}
|
||||
|
||||
// Test unique index naming: uidx_table_columns
|
||||
if _, exists := postsTable.Indexes["uidx_posts_slug"]; !exists {
|
||||
t.Error("Expected unique index 'uidx_posts_slug' not found")
|
||||
t.Logf("Available indexes: %v", getKeys(postsTable.Indexes))
|
||||
}
|
||||
|
||||
// Test regular index naming: idx_table_columns
|
||||
if _, exists := postsTable.Indexes["idx_posts_user_id_published"]; !exists {
|
||||
t.Error("Expected index 'idx_posts_user_id_published' not found")
|
||||
t.Logf("Available indexes: %v", getKeys(postsTable.Indexes))
|
||||
}
|
||||
}
|
||||
|
||||
func getKeys[V any](m map[string]V) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func TestHasCommentedRefs(t *testing.T) {
|
||||
// Test with the actual multifile test fixtures
|
||||
tests := []struct {
|
||||
filename string
|
||||
wantHas bool
|
||||
}{
|
||||
{filepath.Join("..", "..", "..", "tests", "assets", "dbml", "multifile", "1_users.dbml"), false},
|
||||
{filepath.Join("..", "..", "..", "tests", "assets", "dbml", "multifile", "2_posts.dbml"), false},
|
||||
{filepath.Join("..", "..", "..", "tests", "assets", "dbml", "multifile", "3_add_columns.dbml"), false},
|
||||
{filepath.Join("..", "..", "..", "tests", "assets", "dbml", "multifile", "9_refs.dbml"), true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(filepath.Base(tt.filename), func(t *testing.T) {
|
||||
gotHas, err := hasCommentedRefs(tt.filename)
|
||||
if err != nil {
|
||||
t.Fatalf("hasCommentedRefs() error = %v", err)
|
||||
}
|
||||
if gotHas != tt.wantHas {
|
||||
t.Errorf("hasCommentedRefs(%s) = %v, want %v", filepath.Base(tt.filename), gotHas, tt.wantHas)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -329,10 +329,10 @@ func (r *Reader) deriveRelationship(table *models.Table, fk *models.Constraint)
|
||||
relationshipName := fmt.Sprintf("%s_to_%s", table.Name, fk.ReferencedTable)
|
||||
|
||||
relationship := models.InitRelationship(relationshipName, models.OneToMany)
|
||||
relationship.FromTable = fk.ReferencedTable
|
||||
relationship.FromSchema = fk.ReferencedSchema
|
||||
relationship.ToTable = table.Name
|
||||
relationship.ToSchema = table.Schema
|
||||
relationship.FromTable = table.Name
|
||||
relationship.FromSchema = table.Schema
|
||||
relationship.ToTable = fk.ReferencedTable
|
||||
relationship.ToSchema = fk.ReferencedSchema
|
||||
relationship.ForeignKey = fk.Name
|
||||
|
||||
// Store constraint actions in properties
|
||||
|
||||
@@ -328,12 +328,12 @@ func TestDeriveRelationship(t *testing.T) {
|
||||
t.Errorf("Expected relationship type %s, got %s", models.OneToMany, rel.Type)
|
||||
}
|
||||
|
||||
if rel.FromTable != "users" {
|
||||
t.Errorf("Expected FromTable 'users', got '%s'", rel.FromTable)
|
||||
if rel.FromTable != "orders" {
|
||||
t.Errorf("Expected FromTable 'orders', got '%s'", rel.FromTable)
|
||||
}
|
||||
|
||||
if rel.ToTable != "orders" {
|
||||
t.Errorf("Expected ToTable 'orders', got '%s'", rel.ToTable)
|
||||
if rel.ToTable != "users" {
|
||||
t.Errorf("Expected ToTable 'users', got '%s'", rel.ToTable)
|
||||
}
|
||||
|
||||
if rel.ForeignKey != "fk_orders_user_id" {
|
||||
|
||||
@@ -93,6 +93,7 @@ fmt.Printf("Found %d scripts\n", len(schema.Scripts))
|
||||
## Features
|
||||
|
||||
- **Recursive Directory Scanning**: Automatically scans all subdirectories
|
||||
- **Symlink Skipping**: Symbolic links are automatically skipped (prevents loops and duplicates)
|
||||
- **Multiple Extensions**: Supports both `.sql` and `.pgsql` files
|
||||
- **Flexible Naming**: Extract metadata from filename patterns
|
||||
- **Error Handling**: Validates directory existence and file accessibility
|
||||
@@ -153,8 +154,9 @@ go test ./pkg/readers/sqldir/
|
||||
```
|
||||
|
||||
Tests include:
|
||||
- Valid file parsing
|
||||
- Valid file parsing (underscore and hyphen formats)
|
||||
- Recursive directory scanning
|
||||
- Symlink skipping
|
||||
- Invalid filename handling
|
||||
- Empty directory handling
|
||||
- Error conditions
|
||||
|
||||
@@ -107,11 +107,20 @@ func (r *Reader) readScripts() ([]*models.Script, error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip directories
|
||||
// Don't process directories as files (WalkDir still descends into them recursively)
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip symlinks
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get filename
|
||||
filename := d.Name()
|
||||
|
||||
|
||||
@@ -373,3 +373,65 @@ func TestReader_MixedFormat(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_SkipSymlinks(t *testing.T) {
|
||||
// Create temporary test directory
|
||||
tempDir, err := os.MkdirTemp("", "sqldir-test-symlink-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a real SQL file
|
||||
realFile := filepath.Join(tempDir, "1_001_real_file.sql")
|
||||
if err := os.WriteFile(realFile, []byte("SELECT 1;"), 0644); err != nil {
|
||||
t.Fatalf("Failed to create real file: %v", err)
|
||||
}
|
||||
|
||||
// Create another file to link to
|
||||
targetFile := filepath.Join(tempDir, "2_001_target.sql")
|
||||
if err := os.WriteFile(targetFile, []byte("SELECT 2;"), 0644); err != nil {
|
||||
t.Fatalf("Failed to create target file: %v", err)
|
||||
}
|
||||
|
||||
// Create a symlink to the target file (this should be skipped)
|
||||
symlinkFile := filepath.Join(tempDir, "3_001_symlink.sql")
|
||||
if err := os.Symlink(targetFile, symlinkFile); err != nil {
|
||||
// Skip test on systems that don't support symlinks (e.g., Windows without admin)
|
||||
t.Skipf("Symlink creation not supported: %v", err)
|
||||
}
|
||||
|
||||
// Create reader
|
||||
reader := NewReader(&readers.ReaderOptions{
|
||||
FilePath: tempDir,
|
||||
})
|
||||
|
||||
// Read database
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
schema := db.Schemas[0]
|
||||
|
||||
// Should only have 2 scripts (real_file and target), symlink should be skipped
|
||||
if len(schema.Scripts) != 2 {
|
||||
t.Errorf("Expected 2 scripts (symlink should be skipped), got %d", len(schema.Scripts))
|
||||
}
|
||||
|
||||
// Verify the scripts are the real files, not the symlink
|
||||
scriptNames := make(map[string]bool)
|
||||
for _, script := range schema.Scripts {
|
||||
scriptNames[script.Name] = true
|
||||
}
|
||||
|
||||
if !scriptNames["real_file"] {
|
||||
t.Error("Expected 'real_file' script to be present")
|
||||
}
|
||||
if !scriptNames["target"] {
|
||||
t.Error("Expected 'target' script to be present")
|
||||
}
|
||||
if scriptNames["symlink"] {
|
||||
t.Error("Symlink script should have been skipped but was found")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
// TemplateData represents the data passed to the template for code generation
|
||||
@@ -111,13 +112,17 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper) *M
|
||||
tableName = schema + "." + table.Name
|
||||
}
|
||||
|
||||
// Generate model name: singularize and convert to PascalCase
|
||||
// Generate model name: Model + Schema + Table (all PascalCase)
|
||||
singularTable := Singularize(table.Name)
|
||||
modelName := SnakeCaseToPascalCase(singularTable)
|
||||
tablePart := SnakeCaseToPascalCase(singularTable)
|
||||
|
||||
// Add "Model" prefix if not already present
|
||||
if !hasModelPrefix(modelName) {
|
||||
modelName = "Model" + modelName
|
||||
// Include schema name in model name
|
||||
var modelName string
|
||||
if schema != "" {
|
||||
schemaPart := SnakeCaseToPascalCase(schema)
|
||||
modelName = "Model" + schemaPart + tablePart
|
||||
} else {
|
||||
modelName = "Model" + tablePart
|
||||
}
|
||||
|
||||
model := &ModelData{
|
||||
@@ -133,8 +138,10 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper) *M
|
||||
// Find primary key
|
||||
for _, col := range table.Columns {
|
||||
if col.IsPrimaryKey {
|
||||
model.PrimaryKeyField = SnakeCaseToPascalCase(col.Name)
|
||||
model.IDColumnName = col.Name
|
||||
// Sanitize column name to remove backticks
|
||||
safeName := writers.SanitizeStructTagValue(col.Name)
|
||||
model.PrimaryKeyField = SnakeCaseToPascalCase(safeName)
|
||||
model.IDColumnName = safeName
|
||||
// Check if PK type is a SQL type (contains resolvespec_common or sql_types)
|
||||
goType := typeMapper.SQLTypeToGoType(col.Type, col.NotNull)
|
||||
model.PrimaryKeyIsSQL = strings.Contains(goType, "resolvespec_common") || strings.Contains(goType, "sql_types")
|
||||
@@ -146,6 +153,8 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper) *M
|
||||
columns := sortColumns(table.Columns)
|
||||
for _, col := range columns {
|
||||
field := columnToField(col, table, typeMapper)
|
||||
// Check for name collision with generated methods and rename if needed
|
||||
field.Name = resolveFieldNameCollision(field.Name)
|
||||
model.Fields = append(model.Fields, field)
|
||||
}
|
||||
|
||||
@@ -154,10 +163,13 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper) *M
|
||||
|
||||
// columnToField converts a models.Column to FieldData
|
||||
func columnToField(col *models.Column, table *models.Table, typeMapper *TypeMapper) *FieldData {
|
||||
fieldName := SnakeCaseToPascalCase(col.Name)
|
||||
// Sanitize column name first to remove backticks before generating field name
|
||||
safeName := writers.SanitizeStructTagValue(col.Name)
|
||||
fieldName := SnakeCaseToPascalCase(safeName)
|
||||
goType := typeMapper.SQLTypeToGoType(col.Type, col.NotNull)
|
||||
bunTag := typeMapper.BuildBunTag(col, table)
|
||||
jsonTag := col.Name // Use column name for JSON tag
|
||||
// Use same sanitized name for JSON tag
|
||||
jsonTag := safeName
|
||||
|
||||
return &FieldData{
|
||||
Name: fieldName,
|
||||
@@ -184,9 +196,28 @@ func formatComment(description, comment string) string {
|
||||
return comment
|
||||
}
|
||||
|
||||
// hasModelPrefix checks if a name already has "Model" prefix
|
||||
func hasModelPrefix(name string) bool {
|
||||
return len(name) >= 5 && name[:5] == "Model"
|
||||
// resolveFieldNameCollision checks if a field name conflicts with generated method names
|
||||
// and adds an underscore suffix if there's a collision
|
||||
func resolveFieldNameCollision(fieldName string) string {
|
||||
// List of method names that are generated by the template
|
||||
reservedNames := map[string]bool{
|
||||
"TableName": true,
|
||||
"TableNameOnly": true,
|
||||
"SchemaName": true,
|
||||
"GetID": true,
|
||||
"GetIDStr": true,
|
||||
"SetID": true,
|
||||
"UpdateID": true,
|
||||
"GetIDName": true,
|
||||
"GetPrefix": true,
|
||||
}
|
||||
|
||||
// Check if field name conflicts with a reserved method name
|
||||
if reservedNames[fieldName] {
|
||||
return fieldName + "_"
|
||||
}
|
||||
|
||||
return fieldName
|
||||
}
|
||||
|
||||
// sortColumns sorts columns by sequence, then by name
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
// TypeMapper handles type conversions between SQL and Go types for Bun
|
||||
@@ -164,11 +165,14 @@ func (tm *TypeMapper) BuildBunTag(column *models.Column, table *models.Table) st
|
||||
var parts []string
|
||||
|
||||
// Column name comes first (no prefix)
|
||||
parts = append(parts, column.Name)
|
||||
// Sanitize to remove backticks which would break struct tag syntax
|
||||
safeName := writers.SanitizeStructTagValue(column.Name)
|
||||
parts = append(parts, safeName)
|
||||
|
||||
// Add type if specified
|
||||
if column.Type != "" {
|
||||
typeStr := column.Type
|
||||
// Sanitize type to remove backticks
|
||||
typeStr := writers.SanitizeStructTagValue(column.Type)
|
||||
if column.Length > 0 {
|
||||
typeStr = fmt.Sprintf("%s(%d)", typeStr, column.Length)
|
||||
} else if column.Precision > 0 {
|
||||
@@ -188,12 +192,17 @@ func (tm *TypeMapper) BuildBunTag(column *models.Column, table *models.Table) st
|
||||
|
||||
// Default value
|
||||
if column.Default != nil {
|
||||
parts = append(parts, fmt.Sprintf("default:%v", column.Default))
|
||||
// Sanitize default value to remove backticks
|
||||
safeDefault := writers.SanitizeStructTagValue(fmt.Sprintf("%v", column.Default))
|
||||
parts = append(parts, fmt.Sprintf("default:%s", safeDefault))
|
||||
}
|
||||
|
||||
// Nullable (Bun uses nullzero for nullable fields)
|
||||
// and notnull tag for explicitly non-nullable fields
|
||||
if !column.NotNull && !column.IsPrimaryKey {
|
||||
parts = append(parts, "nullzero")
|
||||
} else if column.NotNull && !column.IsPrimaryKey {
|
||||
parts = append(parts, "notnull")
|
||||
}
|
||||
|
||||
// Check for indexes (unique indexes should be added to tag)
|
||||
@@ -260,7 +269,7 @@ func (tm *TypeMapper) NeedsFmtImport(generateGetIDStr bool) bool {
|
||||
|
||||
// GetSQLTypesImport returns the import path for sql_types (ResolveSpec common)
|
||||
func (tm *TypeMapper) GetSQLTypesImport() string {
|
||||
return "github.com/bitechdev/ResolveSpec/pkg/common"
|
||||
return "github.com/bitechdev/ResolveSpec/pkg/spectypes"
|
||||
}
|
||||
|
||||
// GetBunImport returns the import path for Bun
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"go/format"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
@@ -124,7 +125,16 @@ func (w *Writer) writeSingleFile(db *models.Database) error {
|
||||
}
|
||||
|
||||
// Write output
|
||||
return w.writeOutput(formatted)
|
||||
if err := w.writeOutput(formatted); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Run go fmt on the output file
|
||||
if w.options.OutputPath != "" {
|
||||
w.runGoFmt(w.options.OutputPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMultiFile writes each table to a separate file
|
||||
@@ -207,13 +217,19 @@ func (w *Writer) writeMultiFile(db *models.Database) error {
|
||||
}
|
||||
|
||||
// Generate filename: sql_{schema}_{table}.go
|
||||
filename := fmt.Sprintf("sql_%s_%s.go", schema.Name, table.Name)
|
||||
// Sanitize schema and table names to remove quotes, comments, and invalid characters
|
||||
safeSchemaName := writers.SanitizeFilename(schema.Name)
|
||||
safeTableName := writers.SanitizeFilename(table.Name)
|
||||
filename := fmt.Sprintf("sql_%s_%s.go", safeSchemaName, safeTableName)
|
||||
filepath := filepath.Join(w.options.OutputPath, filename)
|
||||
|
||||
// Write file
|
||||
if err := os.WriteFile(filepath, []byte(formatted), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write file %s: %w", filename, err)
|
||||
}
|
||||
|
||||
// Run go fmt on the generated file
|
||||
w.runGoFmt(filepath)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,6 +238,9 @@ func (w *Writer) writeMultiFile(db *models.Database) error {
|
||||
|
||||
// addRelationshipFields adds relationship fields to the model based on foreign keys
|
||||
func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table, schema *models.Schema, db *models.Database) {
|
||||
// Track used field names to detect duplicates
|
||||
usedFieldNames := make(map[string]int)
|
||||
|
||||
// For each foreign key in this table, add a belongs-to/has-one relationship
|
||||
for _, constraint := range table.Constraints {
|
||||
if constraint.Type != models.ForeignKeyConstraint {
|
||||
@@ -235,8 +254,9 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
|
||||
}
|
||||
|
||||
// Create relationship field (has-one in Bun, similar to belongs-to in GORM)
|
||||
refModelName := w.getModelName(constraint.ReferencedTable)
|
||||
fieldName := w.generateRelationshipFieldName(constraint.ReferencedTable)
|
||||
refModelName := w.getModelName(constraint.ReferencedSchema, constraint.ReferencedTable)
|
||||
fieldName := w.generateHasOneFieldName(constraint)
|
||||
fieldName = w.ensureUniqueFieldName(fieldName, usedFieldNames)
|
||||
relationTag := w.typeMapper.BuildRelationshipTag(constraint, "has-one")
|
||||
|
||||
modelData.AddRelationshipField(&FieldData{
|
||||
@@ -263,8 +283,9 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
|
||||
// Check if this constraint references our table
|
||||
if constraint.ReferencedTable == table.Name && constraint.ReferencedSchema == schema.Name {
|
||||
// Add has-many relationship
|
||||
otherModelName := w.getModelName(otherTable.Name)
|
||||
fieldName := w.generateRelationshipFieldName(otherTable.Name) + "s" // Pluralize
|
||||
otherModelName := w.getModelName(otherSchema.Name, otherTable.Name)
|
||||
fieldName := w.generateHasManyFieldName(constraint, otherSchema.Name, otherTable.Name)
|
||||
fieldName = w.ensureUniqueFieldName(fieldName, usedFieldNames)
|
||||
relationTag := w.typeMapper.BuildRelationshipTag(constraint, "has-many")
|
||||
|
||||
modelData.AddRelationshipField(&FieldData{
|
||||
@@ -295,22 +316,77 @@ func (w *Writer) findTable(schemaName, tableName string, db *models.Database) *m
|
||||
return nil
|
||||
}
|
||||
|
||||
// getModelName generates the model name from a table name
|
||||
func (w *Writer) getModelName(tableName string) string {
|
||||
// getModelName generates the model name from schema and table name
|
||||
func (w *Writer) getModelName(schemaName, tableName string) string {
|
||||
singular := Singularize(tableName)
|
||||
modelName := SnakeCaseToPascalCase(singular)
|
||||
tablePart := SnakeCaseToPascalCase(singular)
|
||||
|
||||
if !hasModelPrefix(modelName) {
|
||||
modelName = "Model" + modelName
|
||||
// Include schema name in model name
|
||||
var modelName string
|
||||
if schemaName != "" {
|
||||
schemaPart := SnakeCaseToPascalCase(schemaName)
|
||||
modelName = "Model" + schemaPart + tablePart
|
||||
} else {
|
||||
modelName = "Model" + tablePart
|
||||
}
|
||||
|
||||
return modelName
|
||||
}
|
||||
|
||||
// generateRelationshipFieldName generates a field name for a relationship
|
||||
func (w *Writer) generateRelationshipFieldName(tableName string) string {
|
||||
// Use just the prefix (3 letters) for relationship fields
|
||||
return GeneratePrefix(tableName)
|
||||
// generateHasOneFieldName generates a field name for has-one relationships
|
||||
// Uses the foreign key column name for uniqueness
|
||||
func (w *Writer) generateHasOneFieldName(constraint *models.Constraint) string {
|
||||
// Use the foreign key column name to ensure uniqueness
|
||||
// If there are multiple columns, use the first one
|
||||
if len(constraint.Columns) > 0 {
|
||||
columnName := constraint.Columns[0]
|
||||
// Convert to PascalCase for proper Go field naming
|
||||
// e.g., "rid_filepointer_request" -> "RelRIDFilepointerRequest"
|
||||
return "Rel" + SnakeCaseToPascalCase(columnName)
|
||||
}
|
||||
|
||||
// Fallback to table-based prefix if no columns defined
|
||||
return "Rel" + GeneratePrefix(constraint.ReferencedTable)
|
||||
}
|
||||
|
||||
// generateHasManyFieldName generates a field name for has-many relationships
|
||||
// Uses the foreign key column name + source table name to avoid duplicates
|
||||
func (w *Writer) generateHasManyFieldName(constraint *models.Constraint, sourceSchemaName, sourceTableName string) string {
|
||||
// For has-many, we need to include the source table name to avoid duplicates
|
||||
// e.g., multiple tables referencing the same column on this table
|
||||
if len(constraint.Columns) > 0 {
|
||||
columnName := constraint.Columns[0]
|
||||
// Get the model name for the source table (pluralized)
|
||||
sourceModelName := w.getModelName(sourceSchemaName, sourceTableName)
|
||||
// Remove "Model" prefix if present
|
||||
sourceModelName = strings.TrimPrefix(sourceModelName, "Model")
|
||||
|
||||
// Convert column to PascalCase and combine with source table
|
||||
// e.g., "rid_api_provider" + "Login" -> "RelRIDAPIProviderLogins"
|
||||
columnPart := SnakeCaseToPascalCase(columnName)
|
||||
return "Rel" + columnPart + Pluralize(sourceModelName)
|
||||
}
|
||||
|
||||
// Fallback to table-based naming
|
||||
sourceModelName := w.getModelName(sourceSchemaName, sourceTableName)
|
||||
sourceModelName = strings.TrimPrefix(sourceModelName, "Model")
|
||||
return "Rel" + Pluralize(sourceModelName)
|
||||
}
|
||||
|
||||
// ensureUniqueFieldName ensures a field name is unique by adding numeric suffixes if needed
|
||||
func (w *Writer) ensureUniqueFieldName(fieldName string, usedNames map[string]int) string {
|
||||
originalName := fieldName
|
||||
count := usedNames[originalName]
|
||||
|
||||
if count > 0 {
|
||||
// Name is already used, add numeric suffix
|
||||
fieldName = fmt.Sprintf("%s%d", originalName, count+1)
|
||||
}
|
||||
|
||||
// Increment the counter for this base name
|
||||
usedNames[originalName]++
|
||||
|
||||
return fieldName
|
||||
}
|
||||
|
||||
// getPackageName returns the package name from options or defaults to "models"
|
||||
@@ -341,6 +417,15 @@ func (w *Writer) writeOutput(content string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// runGoFmt runs go fmt on the specified file
|
||||
func (w *Writer) runGoFmt(filepath string) {
|
||||
cmd := exec.Command("gofmt", "-w", filepath)
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Don't fail the whole operation if gofmt fails, just warn
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to run gofmt on %s: %v\n", filepath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldUseMultiFile determines whether to use multi-file mode based on metadata or output path
|
||||
func (w *Writer) shouldUseMultiFile() bool {
|
||||
// Check if multi_file is explicitly set in metadata
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
// Verify key elements are present
|
||||
expectations := []string{
|
||||
"package models",
|
||||
"type ModelUser struct",
|
||||
"type ModelPublicUser struct",
|
||||
"bun.BaseModel",
|
||||
"table:public.users",
|
||||
"alias:users",
|
||||
@@ -78,9 +78,9 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
"resolvespec_common.SqlTime",
|
||||
"bun:\"id",
|
||||
"bun:\"email",
|
||||
"func (m ModelUser) TableName() string",
|
||||
"func (m ModelPublicUser) TableName() string",
|
||||
"return \"public.users\"",
|
||||
"func (m ModelUser) GetID() int64",
|
||||
"func (m ModelPublicUser) GetID() int64",
|
||||
}
|
||||
|
||||
for _, expected := range expectations {
|
||||
@@ -175,12 +175,378 @@ func TestWriter_WriteDatabase_MultiFile(t *testing.T) {
|
||||
postsStr := string(postsContent)
|
||||
|
||||
// Verify relationship is present with Bun format
|
||||
if !strings.Contains(postsStr, "USE") {
|
||||
t.Errorf("Missing relationship field USE")
|
||||
// Should now be RelUserID (has-one) instead of USE
|
||||
if !strings.Contains(postsStr, "RelUserID") {
|
||||
t.Errorf("Missing relationship field RelUserID (new naming convention)")
|
||||
}
|
||||
if !strings.Contains(postsStr, "rel:has-one") {
|
||||
t.Errorf("Missing Bun relationship tag: %s", postsStr)
|
||||
}
|
||||
|
||||
// Check users file contains has-many relationship
|
||||
usersContent, err := os.ReadFile(filepath.Join(tmpDir, "sql_public_users.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read users file: %v", err)
|
||||
}
|
||||
|
||||
usersStr := string(usersContent)
|
||||
|
||||
// Should have RelUserIDPublicPosts (has-many) field - includes schema prefix
|
||||
if !strings.Contains(usersStr, "RelUserIDPublicPosts") {
|
||||
t.Errorf("Missing has-many relationship field RelUserIDPublicPosts")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter_MultipleReferencesToSameTable(t *testing.T) {
|
||||
// Test scenario: api_event table with multiple foreign keys to filepointer table
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("org")
|
||||
|
||||
// Filepointer table
|
||||
filepointer := models.InitTable("filepointer", "org")
|
||||
filepointer.Columns["id_filepointer"] = &models.Column{
|
||||
Name: "id_filepointer",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
schema.Tables = append(schema.Tables, filepointer)
|
||||
|
||||
// API event table with two foreign keys to filepointer
|
||||
apiEvent := models.InitTable("api_event", "org")
|
||||
apiEvent.Columns["id_api_event"] = &models.Column{
|
||||
Name: "id_api_event",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
apiEvent.Columns["rid_filepointer_request"] = &models.Column{
|
||||
Name: "rid_filepointer_request",
|
||||
Type: "bigint",
|
||||
NotNull: false,
|
||||
}
|
||||
apiEvent.Columns["rid_filepointer_response"] = &models.Column{
|
||||
Name: "rid_filepointer_response",
|
||||
Type: "bigint",
|
||||
NotNull: false,
|
||||
}
|
||||
|
||||
// Add constraints
|
||||
apiEvent.Constraints["fk_request"] = &models.Constraint{
|
||||
Name: "fk_request",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_filepointer_request"},
|
||||
ReferencedTable: "filepointer",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_filepointer"},
|
||||
}
|
||||
apiEvent.Constraints["fk_response"] = &models.Constraint{
|
||||
Name: "fk_response",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_filepointer_response"},
|
||||
ReferencedTable: "filepointer",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_filepointer"},
|
||||
}
|
||||
|
||||
schema.Tables = append(schema.Tables, apiEvent)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer
|
||||
tmpDir := t.TempDir()
|
||||
opts := &writers.WriterOptions{
|
||||
PackageName: "models",
|
||||
OutputPath: tmpDir,
|
||||
Metadata: map[string]interface{}{
|
||||
"multi_file": true,
|
||||
},
|
||||
}
|
||||
|
||||
writer := NewWriter(opts)
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
// Read the api_event file
|
||||
apiEventContent, err := os.ReadFile(filepath.Join(tmpDir, "sql_org_api_event.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read api_event file: %v", err)
|
||||
}
|
||||
|
||||
contentStr := string(apiEventContent)
|
||||
|
||||
// Verify both relationships have unique names based on column names
|
||||
expectations := []struct {
|
||||
fieldName string
|
||||
tag string
|
||||
}{
|
||||
{"RelRIDFilepointerRequest", "join:rid_filepointer_request=id_filepointer"},
|
||||
{"RelRIDFilepointerResponse", "join:rid_filepointer_response=id_filepointer"},
|
||||
}
|
||||
|
||||
for _, exp := range expectations {
|
||||
if !strings.Contains(contentStr, exp.fieldName) {
|
||||
t.Errorf("Missing relationship field: %s\nGenerated:\n%s", exp.fieldName, contentStr)
|
||||
}
|
||||
if !strings.Contains(contentStr, exp.tag) {
|
||||
t.Errorf("Missing relationship tag: %s\nGenerated:\n%s", exp.tag, contentStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify NO duplicate field names (old behavior would create duplicate "FIL" fields)
|
||||
if strings.Contains(contentStr, "FIL *ModelFilepointer") {
|
||||
t.Errorf("Found old prefix-based naming (FIL), should use column-based naming")
|
||||
}
|
||||
|
||||
// Also verify has-many relationships on filepointer table
|
||||
filepointerContent, err := os.ReadFile(filepath.Join(tmpDir, "sql_org_filepointer.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read filepointer file: %v", err)
|
||||
}
|
||||
|
||||
filepointerStr := string(filepointerContent)
|
||||
|
||||
// Should have two different has-many relationships with unique names
|
||||
hasManyExpectations := []string{
|
||||
"RelRIDFilepointerRequestOrgAPIEvents", // Has many via rid_filepointer_request
|
||||
"RelRIDFilepointerResponseOrgAPIEvents", // Has many via rid_filepointer_response
|
||||
}
|
||||
|
||||
for _, exp := range hasManyExpectations {
|
||||
if !strings.Contains(filepointerStr, exp) {
|
||||
t.Errorf("Missing has-many relationship field: %s\nGenerated:\n%s", exp, filepointerStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter_MultipleHasManyRelationships(t *testing.T) {
|
||||
// Test scenario: api_provider table referenced by multiple tables via rid_api_provider
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("org")
|
||||
|
||||
// Owner table
|
||||
owner := models.InitTable("owner", "org")
|
||||
owner.Columns["id_owner"] = &models.Column{
|
||||
Name: "id_owner",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
schema.Tables = append(schema.Tables, owner)
|
||||
|
||||
// API Provider table
|
||||
apiProvider := models.InitTable("api_provider", "org")
|
||||
apiProvider.Columns["id_api_provider"] = &models.Column{
|
||||
Name: "id_api_provider",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
apiProvider.Columns["rid_owner"] = &models.Column{
|
||||
Name: "rid_owner",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
}
|
||||
apiProvider.Constraints["fk_owner"] = &models.Constraint{
|
||||
Name: "fk_owner",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_owner"},
|
||||
ReferencedTable: "owner",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_owner"},
|
||||
}
|
||||
schema.Tables = append(schema.Tables, apiProvider)
|
||||
|
||||
// Login table
|
||||
login := models.InitTable("login", "org")
|
||||
login.Columns["id_login"] = &models.Column{
|
||||
Name: "id_login",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
login.Columns["rid_api_provider"] = &models.Column{
|
||||
Name: "rid_api_provider",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
}
|
||||
login.Constraints["fk_api_provider"] = &models.Constraint{
|
||||
Name: "fk_api_provider",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_api_provider"},
|
||||
ReferencedTable: "api_provider",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_api_provider"},
|
||||
}
|
||||
schema.Tables = append(schema.Tables, login)
|
||||
|
||||
// Filepointer table
|
||||
filepointer := models.InitTable("filepointer", "org")
|
||||
filepointer.Columns["id_filepointer"] = &models.Column{
|
||||
Name: "id_filepointer",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
filepointer.Columns["rid_api_provider"] = &models.Column{
|
||||
Name: "rid_api_provider",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
}
|
||||
filepointer.Constraints["fk_api_provider"] = &models.Constraint{
|
||||
Name: "fk_api_provider",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_api_provider"},
|
||||
ReferencedTable: "api_provider",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_api_provider"},
|
||||
}
|
||||
schema.Tables = append(schema.Tables, filepointer)
|
||||
|
||||
// API Event table
|
||||
apiEvent := models.InitTable("api_event", "org")
|
||||
apiEvent.Columns["id_api_event"] = &models.Column{
|
||||
Name: "id_api_event",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
apiEvent.Columns["rid_api_provider"] = &models.Column{
|
||||
Name: "rid_api_provider",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
}
|
||||
apiEvent.Constraints["fk_api_provider"] = &models.Constraint{
|
||||
Name: "fk_api_provider",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_api_provider"},
|
||||
ReferencedTable: "api_provider",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_api_provider"},
|
||||
}
|
||||
schema.Tables = append(schema.Tables, apiEvent)
|
||||
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer
|
||||
tmpDir := t.TempDir()
|
||||
opts := &writers.WriterOptions{
|
||||
PackageName: "models",
|
||||
OutputPath: tmpDir,
|
||||
Metadata: map[string]interface{}{
|
||||
"multi_file": true,
|
||||
},
|
||||
}
|
||||
|
||||
writer := NewWriter(opts)
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
// Read the api_provider file
|
||||
apiProviderContent, err := os.ReadFile(filepath.Join(tmpDir, "sql_org_api_provider.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read api_provider file: %v", err)
|
||||
}
|
||||
|
||||
contentStr := string(apiProviderContent)
|
||||
|
||||
// Verify all has-many relationships have unique names
|
||||
hasManyExpectations := []string{
|
||||
"RelRIDAPIProviderOrgLogins", // Has many via Login
|
||||
"RelRIDAPIProviderOrgFilepointers", // Has many via Filepointer
|
||||
"RelRIDAPIProviderOrgAPIEvents", // Has many via APIEvent
|
||||
"RelRIDOwner", // Has one via rid_owner
|
||||
}
|
||||
|
||||
for _, exp := range hasManyExpectations {
|
||||
if !strings.Contains(contentStr, exp) {
|
||||
t.Errorf("Missing relationship field: %s\nGenerated:\n%s", exp, contentStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify NO duplicate field names
|
||||
// Count occurrences of "RelRIDAPIProvider" fields - should have 3 unique ones
|
||||
count := strings.Count(contentStr, "RelRIDAPIProvider")
|
||||
if count != 3 {
|
||||
t.Errorf("Expected 3 RelRIDAPIProvider* fields, found %d\nGenerated:\n%s", count, contentStr)
|
||||
}
|
||||
|
||||
// Verify no duplicate declarations (would cause compilation error)
|
||||
duplicatePattern := "RelRIDAPIProviders []*Model"
|
||||
if strings.Contains(contentStr, duplicatePattern) {
|
||||
t.Errorf("Found duplicate field declaration pattern, fields should be unique")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter_FieldNameCollision(t *testing.T) {
|
||||
// Test scenario: table with columns that would conflict with generated method names
|
||||
table := models.InitTable("audit_table", "audit")
|
||||
table.Columns["id_audit_table"] = &models.Column{
|
||||
Name: "id_audit_table",
|
||||
Type: "smallint",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
Sequence: 1,
|
||||
}
|
||||
table.Columns["table_name"] = &models.Column{
|
||||
Name: "table_name",
|
||||
Type: "varchar",
|
||||
Length: 100,
|
||||
NotNull: true,
|
||||
Sequence: 2,
|
||||
}
|
||||
table.Columns["table_schema"] = &models.Column{
|
||||
Name: "table_schema",
|
||||
Type: "varchar",
|
||||
Length: 100,
|
||||
NotNull: true,
|
||||
Sequence: 3,
|
||||
}
|
||||
|
||||
// Create writer
|
||||
tmpDir := t.TempDir()
|
||||
opts := &writers.WriterOptions{
|
||||
PackageName: "models",
|
||||
OutputPath: filepath.Join(tmpDir, "test.go"),
|
||||
}
|
||||
|
||||
writer := NewWriter(opts)
|
||||
|
||||
err := writer.WriteTable(table)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteTable failed: %v", err)
|
||||
}
|
||||
|
||||
// Read the generated file
|
||||
content, err := os.ReadFile(opts.OutputPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read generated file: %v", err)
|
||||
}
|
||||
|
||||
generated := string(content)
|
||||
|
||||
// Verify that TableName field was renamed to TableName_ to avoid collision
|
||||
if !strings.Contains(generated, "TableName_") {
|
||||
t.Errorf("Expected field 'TableName_' (with underscore) but not found\nGenerated:\n%s", generated)
|
||||
}
|
||||
|
||||
// Verify the struct tag still references the correct database column
|
||||
if !strings.Contains(generated, `bun:"table_name,`) {
|
||||
t.Errorf("Expected bun tag to reference 'table_name' column\nGenerated:\n%s", generated)
|
||||
}
|
||||
|
||||
// Verify the TableName() method still exists and doesn't conflict
|
||||
if !strings.Contains(generated, "func (m ModelAuditAuditTable) TableName() string") {
|
||||
t.Errorf("TableName() method should still be generated\nGenerated:\n%s", generated)
|
||||
}
|
||||
|
||||
// Verify NO field named just "TableName" (without underscore)
|
||||
if strings.Contains(generated, "TableName resolvespec_common") || strings.Contains(generated, "TableName string") {
|
||||
t.Errorf("Field 'TableName' without underscore should not exist (would conflict with method)\nGenerated:\n%s", generated)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeMapper_SQLTypeToGoType_Bun(t *testing.T) {
|
||||
|
||||
@@ -126,7 +126,15 @@ func (w *Writer) tableToDBML(t *models.Table) string {
|
||||
attrs = append(attrs, "increment")
|
||||
}
|
||||
if column.Default != nil {
|
||||
attrs = append(attrs, fmt.Sprintf("default: `%v`", column.Default))
|
||||
// Check if default value contains backticks (DBML expressions like `now()`)
|
||||
defaultStr := fmt.Sprintf("%v", column.Default)
|
||||
if strings.HasPrefix(defaultStr, "`") && strings.HasSuffix(defaultStr, "`") {
|
||||
// Already an expression with backticks, use as-is
|
||||
attrs = append(attrs, fmt.Sprintf("default: %s", defaultStr))
|
||||
} else {
|
||||
// Regular value, wrap in single quotes
|
||||
attrs = append(attrs, fmt.Sprintf("default: '%v'", column.Default))
|
||||
}
|
||||
}
|
||||
|
||||
if len(attrs) > 0 {
|
||||
|
||||
@@ -196,7 +196,9 @@ func (w *Writer) writeTableFile(table *models.Table, schema *models.Schema, db *
|
||||
}
|
||||
|
||||
// Generate filename: {tableName}.ts
|
||||
filename := filepath.Join(w.options.OutputPath, table.Name+".ts")
|
||||
// Sanitize table name to remove quotes, comments, and invalid characters
|
||||
safeTableName := writers.SanitizeFilename(table.Name)
|
||||
filename := filepath.Join(w.options.OutputPath, safeTableName+".ts")
|
||||
return os.WriteFile(filename, []byte(code), 0644)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
// TemplateData represents the data passed to the template for code generation
|
||||
@@ -24,6 +25,7 @@ type ModelData struct {
|
||||
Fields []*FieldData
|
||||
Config *MethodConfig
|
||||
PrimaryKeyField string // Name of the primary key field
|
||||
PrimaryKeyType string // Go type of the primary key field
|
||||
IDColumnName string // Name of the ID column in database
|
||||
Prefix string // 3-letter prefix
|
||||
}
|
||||
@@ -109,13 +111,17 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper) *M
|
||||
tableName = schema + "." + table.Name
|
||||
}
|
||||
|
||||
// Generate model name: singularize and convert to PascalCase
|
||||
// Generate model name: Model + Schema + Table (all PascalCase)
|
||||
singularTable := Singularize(table.Name)
|
||||
modelName := SnakeCaseToPascalCase(singularTable)
|
||||
tablePart := SnakeCaseToPascalCase(singularTable)
|
||||
|
||||
// Add "Model" prefix if not already present
|
||||
if !hasModelPrefix(modelName) {
|
||||
modelName = "Model" + modelName
|
||||
// Include schema name in model name
|
||||
var modelName string
|
||||
if schema != "" {
|
||||
schemaPart := SnakeCaseToPascalCase(schema)
|
||||
modelName = "Model" + schemaPart + tablePart
|
||||
} else {
|
||||
modelName = "Model" + tablePart
|
||||
}
|
||||
|
||||
model := &ModelData{
|
||||
@@ -131,8 +137,11 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper) *M
|
||||
// Find primary key
|
||||
for _, col := range table.Columns {
|
||||
if col.IsPrimaryKey {
|
||||
model.PrimaryKeyField = SnakeCaseToPascalCase(col.Name)
|
||||
model.IDColumnName = col.Name
|
||||
// Sanitize column name to remove backticks
|
||||
safeName := writers.SanitizeStructTagValue(col.Name)
|
||||
model.PrimaryKeyField = SnakeCaseToPascalCase(safeName)
|
||||
model.PrimaryKeyType = typeMapper.SQLTypeToGoType(col.Type, col.NotNull)
|
||||
model.IDColumnName = safeName
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -141,6 +150,8 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper) *M
|
||||
columns := sortColumns(table.Columns)
|
||||
for _, col := range columns {
|
||||
field := columnToField(col, table, typeMapper)
|
||||
// Check for name collision with generated methods and rename if needed
|
||||
field.Name = resolveFieldNameCollision(field.Name)
|
||||
model.Fields = append(model.Fields, field)
|
||||
}
|
||||
|
||||
@@ -149,10 +160,13 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper) *M
|
||||
|
||||
// columnToField converts a models.Column to FieldData
|
||||
func columnToField(col *models.Column, table *models.Table, typeMapper *TypeMapper) *FieldData {
|
||||
fieldName := SnakeCaseToPascalCase(col.Name)
|
||||
// Sanitize column name first to remove backticks before generating field name
|
||||
safeName := writers.SanitizeStructTagValue(col.Name)
|
||||
fieldName := SnakeCaseToPascalCase(safeName)
|
||||
goType := typeMapper.SQLTypeToGoType(col.Type, col.NotNull)
|
||||
gormTag := typeMapper.BuildGormTag(col, table)
|
||||
jsonTag := col.Name // Use column name for JSON tag
|
||||
// Use same sanitized name for JSON tag
|
||||
jsonTag := safeName
|
||||
|
||||
return &FieldData{
|
||||
Name: fieldName,
|
||||
@@ -179,9 +193,28 @@ func formatComment(description, comment string) string {
|
||||
return comment
|
||||
}
|
||||
|
||||
// hasModelPrefix checks if a name already has "Model" prefix
|
||||
func hasModelPrefix(name string) bool {
|
||||
return len(name) >= 5 && name[:5] == "Model"
|
||||
// resolveFieldNameCollision checks if a field name conflicts with generated method names
|
||||
// and adds an underscore suffix if there's a collision
|
||||
func resolveFieldNameCollision(fieldName string) string {
|
||||
// List of method names that are generated by the template
|
||||
reservedNames := map[string]bool{
|
||||
"TableName": true,
|
||||
"TableNameOnly": true,
|
||||
"SchemaName": true,
|
||||
"GetID": true,
|
||||
"GetIDStr": true,
|
||||
"SetID": true,
|
||||
"UpdateID": true,
|
||||
"GetIDName": true,
|
||||
"GetPrefix": true,
|
||||
}
|
||||
|
||||
// Check if field name conflicts with a reserved method name
|
||||
if reservedNames[fieldName] {
|
||||
return fieldName + "_"
|
||||
}
|
||||
|
||||
return fieldName
|
||||
}
|
||||
|
||||
// sortColumns sorts columns by sequence, then by name
|
||||
|
||||
@@ -62,7 +62,7 @@ func (m {{.Name}}) SetID(newid int64) {
|
||||
{{if and .Config.GenerateUpdateID .PrimaryKeyField}}
|
||||
// UpdateID updates the primary key value
|
||||
func (m *{{.Name}}) UpdateID(newid int64) {
|
||||
m.{{.PrimaryKeyField}} = int32(newid)
|
||||
m.{{.PrimaryKeyField}} = {{.PrimaryKeyType}}(newid)
|
||||
}
|
||||
{{end}}
|
||||
{{if and .Config.GenerateGetIDName .IDColumnName}}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
// TypeMapper handles type conversions between SQL and Go types
|
||||
@@ -199,12 +200,15 @@ func (tm *TypeMapper) BuildGormTag(column *models.Column, table *models.Table) s
|
||||
var parts []string
|
||||
|
||||
// Always include column name (lowercase as per user requirement)
|
||||
parts = append(parts, fmt.Sprintf("column:%s", column.Name))
|
||||
// Sanitize to remove backticks which would break struct tag syntax
|
||||
safeName := writers.SanitizeStructTagValue(column.Name)
|
||||
parts = append(parts, fmt.Sprintf("column:%s", safeName))
|
||||
|
||||
// Add type if specified
|
||||
if column.Type != "" {
|
||||
// Include length, precision, scale if present
|
||||
typeStr := column.Type
|
||||
// Sanitize type to remove backticks
|
||||
typeStr := writers.SanitizeStructTagValue(column.Type)
|
||||
if column.Length > 0 {
|
||||
typeStr = fmt.Sprintf("%s(%d)", typeStr, column.Length)
|
||||
} else if column.Precision > 0 {
|
||||
@@ -234,7 +238,9 @@ func (tm *TypeMapper) BuildGormTag(column *models.Column, table *models.Table) s
|
||||
|
||||
// Default value
|
||||
if column.Default != nil {
|
||||
parts = append(parts, fmt.Sprintf("default:%v", column.Default))
|
||||
// Sanitize default value to remove backticks
|
||||
safeDefault := writers.SanitizeStructTagValue(fmt.Sprintf("%v", column.Default))
|
||||
parts = append(parts, fmt.Sprintf("default:%s", safeDefault))
|
||||
}
|
||||
|
||||
// Check for unique constraint
|
||||
@@ -331,5 +337,5 @@ func (tm *TypeMapper) NeedsFmtImport(generateGetIDStr bool) bool {
|
||||
|
||||
// GetSQLTypesImport returns the import path for sql_types
|
||||
func (tm *TypeMapper) GetSQLTypesImport() string {
|
||||
return "github.com/bitechdev/ResolveSpec/pkg/common/sql_types"
|
||||
return "github.com/bitechdev/ResolveSpec/pkg/spectypes"
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"go/format"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
@@ -121,7 +122,16 @@ func (w *Writer) writeSingleFile(db *models.Database) error {
|
||||
}
|
||||
|
||||
// Write output
|
||||
return w.writeOutput(formatted)
|
||||
if err := w.writeOutput(formatted); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Run go fmt on the output file
|
||||
if w.options.OutputPath != "" {
|
||||
w.runGoFmt(w.options.OutputPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMultiFile writes each table to a separate file
|
||||
@@ -201,13 +211,19 @@ func (w *Writer) writeMultiFile(db *models.Database) error {
|
||||
}
|
||||
|
||||
// Generate filename: sql_{schema}_{table}.go
|
||||
filename := fmt.Sprintf("sql_%s_%s.go", schema.Name, table.Name)
|
||||
// Sanitize schema and table names to remove quotes, comments, and invalid characters
|
||||
safeSchemaName := writers.SanitizeFilename(schema.Name)
|
||||
safeTableName := writers.SanitizeFilename(table.Name)
|
||||
filename := fmt.Sprintf("sql_%s_%s.go", safeSchemaName, safeTableName)
|
||||
filepath := filepath.Join(w.options.OutputPath, filename)
|
||||
|
||||
// Write file
|
||||
if err := os.WriteFile(filepath, []byte(formatted), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write file %s: %w", filename, err)
|
||||
}
|
||||
|
||||
// Run go fmt on the generated file
|
||||
w.runGoFmt(filepath)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,6 +232,9 @@ func (w *Writer) writeMultiFile(db *models.Database) error {
|
||||
|
||||
// addRelationshipFields adds relationship fields to the model based on foreign keys
|
||||
func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table, schema *models.Schema, db *models.Database) {
|
||||
// Track used field names to detect duplicates
|
||||
usedFieldNames := make(map[string]int)
|
||||
|
||||
// For each foreign key in this table, add a belongs-to relationship
|
||||
for _, constraint := range table.Constraints {
|
||||
if constraint.Type != models.ForeignKeyConstraint {
|
||||
@@ -229,8 +248,9 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
|
||||
}
|
||||
|
||||
// Create relationship field (belongs-to)
|
||||
refModelName := w.getModelName(constraint.ReferencedTable)
|
||||
fieldName := w.generateRelationshipFieldName(constraint.ReferencedTable)
|
||||
refModelName := w.getModelName(constraint.ReferencedSchema, constraint.ReferencedTable)
|
||||
fieldName := w.generateBelongsToFieldName(constraint)
|
||||
fieldName = w.ensureUniqueFieldName(fieldName, usedFieldNames)
|
||||
relationTag := w.typeMapper.BuildRelationshipTag(constraint, false)
|
||||
|
||||
modelData.AddRelationshipField(&FieldData{
|
||||
@@ -257,8 +277,9 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
|
||||
// Check if this constraint references our table
|
||||
if constraint.ReferencedTable == table.Name && constraint.ReferencedSchema == schema.Name {
|
||||
// Add has-many relationship
|
||||
otherModelName := w.getModelName(otherTable.Name)
|
||||
fieldName := w.generateRelationshipFieldName(otherTable.Name) + "s" // Pluralize
|
||||
otherModelName := w.getModelName(otherSchema.Name, otherTable.Name)
|
||||
fieldName := w.generateHasManyFieldName(constraint, otherSchema.Name, otherTable.Name)
|
||||
fieldName = w.ensureUniqueFieldName(fieldName, usedFieldNames)
|
||||
relationTag := w.typeMapper.BuildRelationshipTag(constraint, true)
|
||||
|
||||
modelData.AddRelationshipField(&FieldData{
|
||||
@@ -289,22 +310,77 @@ func (w *Writer) findTable(schemaName, tableName string, db *models.Database) *m
|
||||
return nil
|
||||
}
|
||||
|
||||
// getModelName generates the model name from a table name
|
||||
func (w *Writer) getModelName(tableName string) string {
|
||||
// getModelName generates the model name from schema and table name
|
||||
func (w *Writer) getModelName(schemaName, tableName string) string {
|
||||
singular := Singularize(tableName)
|
||||
modelName := SnakeCaseToPascalCase(singular)
|
||||
tablePart := SnakeCaseToPascalCase(singular)
|
||||
|
||||
if !hasModelPrefix(modelName) {
|
||||
modelName = "Model" + modelName
|
||||
// Include schema name in model name
|
||||
var modelName string
|
||||
if schemaName != "" {
|
||||
schemaPart := SnakeCaseToPascalCase(schemaName)
|
||||
modelName = "Model" + schemaPart + tablePart
|
||||
} else {
|
||||
modelName = "Model" + tablePart
|
||||
}
|
||||
|
||||
return modelName
|
||||
}
|
||||
|
||||
// generateRelationshipFieldName generates a field name for a relationship
|
||||
func (w *Writer) generateRelationshipFieldName(tableName string) string {
|
||||
// Use just the prefix (3 letters) for relationship fields
|
||||
return GeneratePrefix(tableName)
|
||||
// generateBelongsToFieldName generates a field name for belongs-to relationships
|
||||
// Uses the foreign key column name for uniqueness
|
||||
func (w *Writer) generateBelongsToFieldName(constraint *models.Constraint) string {
|
||||
// Use the foreign key column name to ensure uniqueness
|
||||
// If there are multiple columns, use the first one
|
||||
if len(constraint.Columns) > 0 {
|
||||
columnName := constraint.Columns[0]
|
||||
// Convert to PascalCase for proper Go field naming
|
||||
// e.g., "rid_filepointer_request" -> "RelRIDFilepointerRequest"
|
||||
return "Rel" + SnakeCaseToPascalCase(columnName)
|
||||
}
|
||||
|
||||
// Fallback to table-based prefix if no columns defined
|
||||
return "Rel" + GeneratePrefix(constraint.ReferencedTable)
|
||||
}
|
||||
|
||||
// generateHasManyFieldName generates a field name for has-many relationships
|
||||
// Uses the foreign key column name + source table name to avoid duplicates
|
||||
func (w *Writer) generateHasManyFieldName(constraint *models.Constraint, sourceSchemaName, sourceTableName string) string {
|
||||
// For has-many, we need to include the source table name to avoid duplicates
|
||||
// e.g., multiple tables referencing the same column on this table
|
||||
if len(constraint.Columns) > 0 {
|
||||
columnName := constraint.Columns[0]
|
||||
// Get the model name for the source table (pluralized)
|
||||
sourceModelName := w.getModelName(sourceSchemaName, sourceTableName)
|
||||
// Remove "Model" prefix if present
|
||||
sourceModelName = strings.TrimPrefix(sourceModelName, "Model")
|
||||
|
||||
// Convert column to PascalCase and combine with source table
|
||||
// e.g., "rid_api_provider" + "Login" -> "RelRIDAPIProviderLogins"
|
||||
columnPart := SnakeCaseToPascalCase(columnName)
|
||||
return "Rel" + columnPart + Pluralize(sourceModelName)
|
||||
}
|
||||
|
||||
// Fallback to table-based naming
|
||||
sourceModelName := w.getModelName(sourceSchemaName, sourceTableName)
|
||||
sourceModelName = strings.TrimPrefix(sourceModelName, "Model")
|
||||
return "Rel" + Pluralize(sourceModelName)
|
||||
}
|
||||
|
||||
// ensureUniqueFieldName ensures a field name is unique by adding numeric suffixes if needed
|
||||
func (w *Writer) ensureUniqueFieldName(fieldName string, usedNames map[string]int) string {
|
||||
originalName := fieldName
|
||||
count := usedNames[originalName]
|
||||
|
||||
if count > 0 {
|
||||
// Name is already used, add numeric suffix
|
||||
fieldName = fmt.Sprintf("%s%d", originalName, count+1)
|
||||
}
|
||||
|
||||
// Increment the counter for this base name
|
||||
usedNames[originalName]++
|
||||
|
||||
return fieldName
|
||||
}
|
||||
|
||||
// getPackageName returns the package name from options or defaults to "models"
|
||||
@@ -335,6 +411,15 @@ func (w *Writer) writeOutput(content string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// runGoFmt runs go fmt on the specified file
|
||||
func (w *Writer) runGoFmt(filepath string) {
|
||||
cmd := exec.Command("gofmt", "-w", filepath)
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Don't fail the whole operation if gofmt fails, just warn
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to run gofmt on %s: %v\n", filepath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldUseMultiFile determines whether to use multi-file mode based on metadata or output path
|
||||
func (w *Writer) shouldUseMultiFile() bool {
|
||||
// Check if multi_file is explicitly set in metadata
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
// Verify key elements are present
|
||||
expectations := []string{
|
||||
"package models",
|
||||
"type ModelUser struct",
|
||||
"type ModelPublicUser struct",
|
||||
"ID",
|
||||
"int64",
|
||||
"Email",
|
||||
@@ -75,9 +75,9 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
"time.Time",
|
||||
"gorm:\"column:id",
|
||||
"gorm:\"column:email",
|
||||
"func (m ModelUser) TableName() string",
|
||||
"func (m ModelPublicUser) TableName() string",
|
||||
"return \"public.users\"",
|
||||
"func (m ModelUser) GetID() int64",
|
||||
"func (m ModelPublicUser) GetID() int64",
|
||||
}
|
||||
|
||||
for _, expected := range expectations {
|
||||
@@ -164,9 +164,437 @@ func TestWriter_WriteDatabase_MultiFile(t *testing.T) {
|
||||
t.Fatalf("Failed to read posts file: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(postsContent), "USE *ModelUser") {
|
||||
// Relationship field should be present
|
||||
t.Logf("Posts content:\n%s", string(postsContent))
|
||||
postsStr := string(postsContent)
|
||||
|
||||
// Verify relationship is present with new naming convention
|
||||
// Should now be RelUserID (belongs-to) instead of USE
|
||||
if !strings.Contains(postsStr, "RelUserID") {
|
||||
t.Errorf("Missing relationship field RelUserID (new naming convention)")
|
||||
}
|
||||
|
||||
// Check users file contains has-many relationship
|
||||
usersContent, err := os.ReadFile(filepath.Join(tmpDir, "sql_public_users.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read users file: %v", err)
|
||||
}
|
||||
|
||||
usersStr := string(usersContent)
|
||||
|
||||
// Should have RelUserIDPublicPosts (has-many) field - includes schema prefix
|
||||
if !strings.Contains(usersStr, "RelUserIDPublicPosts") {
|
||||
t.Errorf("Missing has-many relationship field RelUserIDPublicPosts")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter_MultipleReferencesToSameTable(t *testing.T) {
|
||||
// Test scenario: api_event table with multiple foreign keys to filepointer table
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("org")
|
||||
|
||||
// Filepointer table
|
||||
filepointer := models.InitTable("filepointer", "org")
|
||||
filepointer.Columns["id_filepointer"] = &models.Column{
|
||||
Name: "id_filepointer",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
schema.Tables = append(schema.Tables, filepointer)
|
||||
|
||||
// API event table with two foreign keys to filepointer
|
||||
apiEvent := models.InitTable("api_event", "org")
|
||||
apiEvent.Columns["id_api_event"] = &models.Column{
|
||||
Name: "id_api_event",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
apiEvent.Columns["rid_filepointer_request"] = &models.Column{
|
||||
Name: "rid_filepointer_request",
|
||||
Type: "bigint",
|
||||
NotNull: false,
|
||||
}
|
||||
apiEvent.Columns["rid_filepointer_response"] = &models.Column{
|
||||
Name: "rid_filepointer_response",
|
||||
Type: "bigint",
|
||||
NotNull: false,
|
||||
}
|
||||
|
||||
// Add constraints
|
||||
apiEvent.Constraints["fk_request"] = &models.Constraint{
|
||||
Name: "fk_request",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_filepointer_request"},
|
||||
ReferencedTable: "filepointer",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_filepointer"},
|
||||
}
|
||||
apiEvent.Constraints["fk_response"] = &models.Constraint{
|
||||
Name: "fk_response",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_filepointer_response"},
|
||||
ReferencedTable: "filepointer",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_filepointer"},
|
||||
}
|
||||
|
||||
schema.Tables = append(schema.Tables, apiEvent)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer
|
||||
tmpDir := t.TempDir()
|
||||
opts := &writers.WriterOptions{
|
||||
PackageName: "models",
|
||||
OutputPath: tmpDir,
|
||||
Metadata: map[string]interface{}{
|
||||
"multi_file": true,
|
||||
},
|
||||
}
|
||||
|
||||
writer := NewWriter(opts)
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
// Read the api_event file
|
||||
apiEventContent, err := os.ReadFile(filepath.Join(tmpDir, "sql_org_api_event.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read api_event file: %v", err)
|
||||
}
|
||||
|
||||
contentStr := string(apiEventContent)
|
||||
|
||||
// Verify both relationships have unique names based on column names
|
||||
expectations := []struct {
|
||||
fieldName string
|
||||
tag string
|
||||
}{
|
||||
{"RelRIDFilepointerRequest", "foreignKey:RIDFilepointerRequest"},
|
||||
{"RelRIDFilepointerResponse", "foreignKey:RIDFilepointerResponse"},
|
||||
}
|
||||
|
||||
for _, exp := range expectations {
|
||||
if !strings.Contains(contentStr, exp.fieldName) {
|
||||
t.Errorf("Missing relationship field: %s\nGenerated:\n%s", exp.fieldName, contentStr)
|
||||
}
|
||||
if !strings.Contains(contentStr, exp.tag) {
|
||||
t.Errorf("Missing relationship tag: %s\nGenerated:\n%s", exp.tag, contentStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify NO duplicate field names (old behavior would create duplicate "FIL" fields)
|
||||
if strings.Contains(contentStr, "FIL *ModelFilepointer") {
|
||||
t.Errorf("Found old prefix-based naming (FIL), should use column-based naming")
|
||||
}
|
||||
|
||||
// Also verify has-many relationships on filepointer table
|
||||
filepointerContent, err := os.ReadFile(filepath.Join(tmpDir, "sql_org_filepointer.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read filepointer file: %v", err)
|
||||
}
|
||||
|
||||
filepointerStr := string(filepointerContent)
|
||||
|
||||
// Should have two different has-many relationships with unique names
|
||||
hasManyExpectations := []string{
|
||||
"RelRIDFilepointerRequestOrgAPIEvents", // Has many via rid_filepointer_request
|
||||
"RelRIDFilepointerResponseOrgAPIEvents", // Has many via rid_filepointer_response
|
||||
}
|
||||
|
||||
for _, exp := range hasManyExpectations {
|
||||
if !strings.Contains(filepointerStr, exp) {
|
||||
t.Errorf("Missing has-many relationship field: %s\nGenerated:\n%s", exp, filepointerStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter_MultipleHasManyRelationships(t *testing.T) {
|
||||
// Test scenario: api_provider table referenced by multiple tables via rid_api_provider
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("org")
|
||||
|
||||
// Owner table
|
||||
owner := models.InitTable("owner", "org")
|
||||
owner.Columns["id_owner"] = &models.Column{
|
||||
Name: "id_owner",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
schema.Tables = append(schema.Tables, owner)
|
||||
|
||||
// API Provider table
|
||||
apiProvider := models.InitTable("api_provider", "org")
|
||||
apiProvider.Columns["id_api_provider"] = &models.Column{
|
||||
Name: "id_api_provider",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
apiProvider.Columns["rid_owner"] = &models.Column{
|
||||
Name: "rid_owner",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
}
|
||||
apiProvider.Constraints["fk_owner"] = &models.Constraint{
|
||||
Name: "fk_owner",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_owner"},
|
||||
ReferencedTable: "owner",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_owner"},
|
||||
}
|
||||
schema.Tables = append(schema.Tables, apiProvider)
|
||||
|
||||
// Login table
|
||||
login := models.InitTable("login", "org")
|
||||
login.Columns["id_login"] = &models.Column{
|
||||
Name: "id_login",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
login.Columns["rid_api_provider"] = &models.Column{
|
||||
Name: "rid_api_provider",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
}
|
||||
login.Constraints["fk_api_provider"] = &models.Constraint{
|
||||
Name: "fk_api_provider",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_api_provider"},
|
||||
ReferencedTable: "api_provider",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_api_provider"},
|
||||
}
|
||||
schema.Tables = append(schema.Tables, login)
|
||||
|
||||
// Filepointer table
|
||||
filepointer := models.InitTable("filepointer", "org")
|
||||
filepointer.Columns["id_filepointer"] = &models.Column{
|
||||
Name: "id_filepointer",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
filepointer.Columns["rid_api_provider"] = &models.Column{
|
||||
Name: "rid_api_provider",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
}
|
||||
filepointer.Constraints["fk_api_provider"] = &models.Constraint{
|
||||
Name: "fk_api_provider",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_api_provider"},
|
||||
ReferencedTable: "api_provider",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_api_provider"},
|
||||
}
|
||||
schema.Tables = append(schema.Tables, filepointer)
|
||||
|
||||
// API Event table
|
||||
apiEvent := models.InitTable("api_event", "org")
|
||||
apiEvent.Columns["id_api_event"] = &models.Column{
|
||||
Name: "id_api_event",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
apiEvent.Columns["rid_api_provider"] = &models.Column{
|
||||
Name: "rid_api_provider",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
}
|
||||
apiEvent.Constraints["fk_api_provider"] = &models.Constraint{
|
||||
Name: "fk_api_provider",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Columns: []string{"rid_api_provider"},
|
||||
ReferencedTable: "api_provider",
|
||||
ReferencedSchema: "org",
|
||||
ReferencedColumns: []string{"id_api_provider"},
|
||||
}
|
||||
schema.Tables = append(schema.Tables, apiEvent)
|
||||
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer
|
||||
tmpDir := t.TempDir()
|
||||
opts := &writers.WriterOptions{
|
||||
PackageName: "models",
|
||||
OutputPath: tmpDir,
|
||||
Metadata: map[string]interface{}{
|
||||
"multi_file": true,
|
||||
},
|
||||
}
|
||||
|
||||
writer := NewWriter(opts)
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
// Read the api_provider file
|
||||
apiProviderContent, err := os.ReadFile(filepath.Join(tmpDir, "sql_org_api_provider.go"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read api_provider file: %v", err)
|
||||
}
|
||||
|
||||
contentStr := string(apiProviderContent)
|
||||
|
||||
// Verify all has-many relationships have unique names
|
||||
hasManyExpectations := []string{
|
||||
"RelRIDAPIProviderOrgLogins", // Has many via Login
|
||||
"RelRIDAPIProviderOrgFilepointers", // Has many via Filepointer
|
||||
"RelRIDAPIProviderOrgAPIEvents", // Has many via APIEvent
|
||||
"RelRIDOwner", // Belongs to via rid_owner
|
||||
}
|
||||
|
||||
for _, exp := range hasManyExpectations {
|
||||
if !strings.Contains(contentStr, exp) {
|
||||
t.Errorf("Missing relationship field: %s\nGenerated:\n%s", exp, contentStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify NO duplicate field names
|
||||
// Count occurrences of "RelRIDAPIProvider" fields - should have 3 unique ones
|
||||
count := strings.Count(contentStr, "RelRIDAPIProvider")
|
||||
if count != 3 {
|
||||
t.Errorf("Expected 3 RelRIDAPIProvider* fields, found %d\nGenerated:\n%s", count, contentStr)
|
||||
}
|
||||
|
||||
// Verify no duplicate declarations (would cause compilation error)
|
||||
duplicatePattern := "RelRIDAPIProviders []*Model"
|
||||
if strings.Contains(contentStr, duplicatePattern) {
|
||||
t.Errorf("Found duplicate field declaration pattern, fields should be unique")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter_FieldNameCollision(t *testing.T) {
|
||||
// Test scenario: table with columns that would conflict with generated method names
|
||||
table := models.InitTable("audit_table", "audit")
|
||||
table.Columns["id_audit_table"] = &models.Column{
|
||||
Name: "id_audit_table",
|
||||
Type: "smallint",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
Sequence: 1,
|
||||
}
|
||||
table.Columns["table_name"] = &models.Column{
|
||||
Name: "table_name",
|
||||
Type: "varchar",
|
||||
Length: 100,
|
||||
NotNull: true,
|
||||
Sequence: 2,
|
||||
}
|
||||
table.Columns["table_schema"] = &models.Column{
|
||||
Name: "table_schema",
|
||||
Type: "varchar",
|
||||
Length: 100,
|
||||
NotNull: true,
|
||||
Sequence: 3,
|
||||
}
|
||||
|
||||
// Create writer
|
||||
tmpDir := t.TempDir()
|
||||
opts := &writers.WriterOptions{
|
||||
PackageName: "models",
|
||||
OutputPath: filepath.Join(tmpDir, "test.go"),
|
||||
}
|
||||
|
||||
writer := NewWriter(opts)
|
||||
|
||||
err := writer.WriteTable(table)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteTable failed: %v", err)
|
||||
}
|
||||
|
||||
// Read the generated file
|
||||
content, err := os.ReadFile(opts.OutputPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read generated file: %v", err)
|
||||
}
|
||||
|
||||
generated := string(content)
|
||||
|
||||
// Verify that TableName field was renamed to TableName_ to avoid collision
|
||||
if !strings.Contains(generated, "TableName_") {
|
||||
t.Errorf("Expected field 'TableName_' (with underscore) but not found\nGenerated:\n%s", generated)
|
||||
}
|
||||
|
||||
// Verify the struct tag still references the correct database column
|
||||
if !strings.Contains(generated, `gorm:"column:table_name;`) {
|
||||
t.Errorf("Expected gorm tag to reference 'table_name' column\nGenerated:\n%s", generated)
|
||||
}
|
||||
|
||||
// Verify the TableName() method still exists and doesn't conflict
|
||||
if !strings.Contains(generated, "func (m ModelAuditAuditTable) TableName() string") {
|
||||
t.Errorf("TableName() method should still be generated\nGenerated:\n%s", generated)
|
||||
}
|
||||
|
||||
// Verify NO field named just "TableName" (without underscore)
|
||||
if strings.Contains(generated, "TableName sql_types") || strings.Contains(generated, "TableName string") {
|
||||
t.Errorf("Field 'TableName' without underscore should not exist (would conflict with method)\nGenerated:\n%s", generated)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter_UpdateIDTypeSafety(t *testing.T) {
|
||||
// Test scenario: tables with different primary key types
|
||||
tests := []struct {
|
||||
name string
|
||||
pkType string
|
||||
expectedPK string
|
||||
castType string
|
||||
}{
|
||||
{"int32_pk", "int", "int32", "int32(newid)"},
|
||||
{"int16_pk", "smallint", "int16", "int16(newid)"},
|
||||
{"int64_pk", "bigint", "int64", "int64(newid)"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
table := models.InitTable("test_table", "public")
|
||||
table.Columns["id"] = &models.Column{
|
||||
Name: "id",
|
||||
Type: tt.pkType,
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
opts := &writers.WriterOptions{
|
||||
PackageName: "models",
|
||||
OutputPath: filepath.Join(tmpDir, "test.go"),
|
||||
}
|
||||
|
||||
writer := NewWriter(opts)
|
||||
err := writer.WriteTable(table)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteTable failed: %v", err)
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(opts.OutputPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read generated file: %v", err)
|
||||
}
|
||||
|
||||
generated := string(content)
|
||||
|
||||
// Verify UpdateID method has correct type cast
|
||||
if !strings.Contains(generated, tt.castType) {
|
||||
t.Errorf("Expected UpdateID to cast to %s\nGenerated:\n%s", tt.castType, generated)
|
||||
}
|
||||
|
||||
// Verify no invalid int32(newid) for non-int32 types
|
||||
if tt.expectedPK != "int32" && strings.Contains(generated, "int32(newid)") {
|
||||
t.Errorf("UpdateID should not cast to int32 for %s type\nGenerated:\n%s", tt.pkType, generated)
|
||||
}
|
||||
|
||||
// Verify UpdateID parameter is int64 (for consistency)
|
||||
if !strings.Contains(generated, "UpdateID(newid int64)") {
|
||||
t.Errorf("UpdateID should accept int64 parameter\nGenerated:\n%s", generated)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
217
pkg/writers/pgsql/NAMING_CONVENTIONS.md
Normal file
217
pkg/writers/pgsql/NAMING_CONVENTIONS.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# PostgreSQL Naming Conventions
|
||||
|
||||
Standardized naming rules for all database objects in RelSpec PostgreSQL output.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Object Type | Prefix | Format | Example |
|
||||
| ----------------- | ----------- | ---------------------------------- | ------------------------ |
|
||||
| Primary Key | `pk_` | `pk_<schema>_<table>` | `pk_public_users` |
|
||||
| Foreign Key | `fk_` | `fk_<table>_<referenced_table>` | `fk_posts_users` |
|
||||
| Unique Constraint | `uk_` | `uk_<table>_<column>` | `uk_users_email` |
|
||||
| Unique Index | `uidx_` | `uidx_<table>_<column>` | `uidx_users_email` |
|
||||
| Regular Index | `idx_` | `idx_<table>_<column>` | `idx_posts_user_id` |
|
||||
| Check Constraint | `chk_` | `chk_<table>_<constraint_purpose>` | `chk_users_age_positive` |
|
||||
| Sequence | `identity_` | `identity_<table>_<column>` | `identity_users_id` |
|
||||
| Trigger | `t_` | `t_<purpose>_<table>` | `t_audit_users` |
|
||||
| Trigger Function | `tf_` | `tf_<purpose>_<table>` | `tf_audit_users` |
|
||||
|
||||
## Naming Rules by Object Type
|
||||
|
||||
### Primary Keys
|
||||
|
||||
**Pattern:** `pk_<schema>_<table>`
|
||||
|
||||
- Include schema name to avoid collisions across schemas
|
||||
- Use lowercase, snake_case format
|
||||
- Examples:
|
||||
- `pk_public_users`
|
||||
- `pk_audit_audit_log`
|
||||
- `pk_staging_temp_data`
|
||||
|
||||
### Foreign Keys
|
||||
|
||||
**Pattern:** `fk_<table>_<referenced_table>`
|
||||
|
||||
- Reference the table containing the FK followed by the referenced table
|
||||
- Use lowercase, snake_case format
|
||||
- Do NOT include column names in standard FK constraints
|
||||
- Examples:
|
||||
- `fk_posts_users` (posts.user_id → users.id)
|
||||
- `fk_comments_posts` (comments.post_id → posts.id)
|
||||
- `fk_order_items_orders` (order_items.order_id → orders.id)
|
||||
|
||||
### Unique Constraints
|
||||
|
||||
**Pattern:** `uk_<table>_<column>`
|
||||
|
||||
- Use `uk_` prefix strictly for database constraints (CONSTRAINT type)
|
||||
- Include column name for clarity
|
||||
- Examples:
|
||||
- `uk_users_email`
|
||||
- `uk_users_username`
|
||||
- `uk_products_sku`
|
||||
|
||||
### Unique Indexes
|
||||
|
||||
**Pattern:** `uidx_<table>_<column>`
|
||||
|
||||
- Use `uidx_` prefix strictly for index type objects
|
||||
- Distinguished from constraints for clarity and implementation flexibility
|
||||
- Examples:
|
||||
- `uidx_users_email`
|
||||
- `uidx_sessions_token`
|
||||
- `uidx_api_keys_key`
|
||||
|
||||
### Regular Indexes
|
||||
|
||||
**Pattern:** `idx_<table>_<column>`
|
||||
|
||||
- Standard indexes for query optimization
|
||||
- Single column: `idx_<table>_<column>`
|
||||
- Examples:
|
||||
- `idx_posts_user_id`
|
||||
- `idx_orders_created_at`
|
||||
- `idx_users_status`
|
||||
|
||||
### Check Constraints
|
||||
|
||||
**Pattern:** `chk_<table>_<constraint_purpose>`
|
||||
|
||||
- Describe the constraint validation purpose
|
||||
- Use lowercase, snake_case for the purpose
|
||||
- Examples:
|
||||
- `chk_users_age_positive` (CHECK (age > 0))
|
||||
- `chk_orders_quantity_positive` (CHECK (quantity > 0))
|
||||
- `chk_products_price_valid` (CHECK (price >= 0))
|
||||
- `chk_users_status_enum` (CHECK (status IN ('active', 'inactive')))
|
||||
|
||||
### Sequences
|
||||
|
||||
**Pattern:** `identity_<table>_<column>`
|
||||
|
||||
- Used for SERIAL/IDENTITY columns
|
||||
- Explicitly named for clarity and management
|
||||
- Examples:
|
||||
- `identity_users_id`
|
||||
- `identity_posts_id`
|
||||
- `identity_transactions_id`
|
||||
|
||||
### Triggers
|
||||
|
||||
**Pattern:** `t_<purpose>_<table>`
|
||||
|
||||
- Include purpose before table name
|
||||
- Lowercase, snake_case format
|
||||
- Examples:
|
||||
- `t_audit_users` (audit trigger on users table)
|
||||
- `t_update_timestamp_posts` (timestamp update trigger on posts)
|
||||
- `t_validate_orders` (validation trigger on orders)
|
||||
|
||||
### Trigger Functions
|
||||
|
||||
**Pattern:** `tf_<purpose>_<table>`
|
||||
|
||||
- Pair with trigger naming convention
|
||||
- Use `tf_` prefix to distinguish from triggers themselves
|
||||
- Examples:
|
||||
- `tf_audit_users` (function for t_audit_users)
|
||||
- `tf_update_timestamp_posts` (function for t_update_timestamp_posts)
|
||||
- `tf_validate_orders` (function for t_validate_orders)
|
||||
|
||||
## Multi-Column Objects
|
||||
|
||||
### Composite Primary Keys
|
||||
|
||||
**Pattern:** `pk_<schema>_<table>`
|
||||
|
||||
- Same as single-column PKs
|
||||
- Example: `pk_public_order_items` (composite key on order_id + item_id)
|
||||
|
||||
### Composite Unique Constraints
|
||||
|
||||
**Pattern:** `uk_<table>_<column1>_<column2>_[...]`
|
||||
|
||||
- Append all column names in order
|
||||
- Examples:
|
||||
- `uk_users_email_domain` (UNIQUE(email, domain))
|
||||
- `uk_inventory_warehouse_sku` (UNIQUE(warehouse_id, sku))
|
||||
|
||||
### Composite Unique Indexes
|
||||
|
||||
**Pattern:** `uidx_<table>_<column1>_<column2>_[...]`
|
||||
|
||||
- Append all column names in order
|
||||
- Examples:
|
||||
- `uidx_users_first_name_last_name` (UNIQUE INDEX on first_name, last_name)
|
||||
- `uidx_sessions_user_id_device_id` (UNIQUE INDEX on user_id, device_id)
|
||||
|
||||
### Composite Regular Indexes
|
||||
|
||||
**Pattern:** `idx_<table>_<column1>_<column2>_[...]`
|
||||
|
||||
- Append all column names in order
|
||||
- List columns in typical query filter order
|
||||
- Examples:
|
||||
- `idx_orders_user_id_created_at` (filter by user, then sort by created_at)
|
||||
- `idx_logs_level_timestamp` (filter by level, then by timestamp)
|
||||
|
||||
## Special Cases & Conventions
|
||||
|
||||
### Audit Trail Tables
|
||||
|
||||
- Audit table naming: `<original_table>_audit` or `audit_<original_table>`
|
||||
- Audit indexes follow standard pattern: `idx_<audit_table>_<column>`
|
||||
- Examples:
|
||||
- Users table audit: `users_audit` with `idx_users_audit_tablename`, `idx_users_audit_changedate`
|
||||
- Posts table audit: `posts_audit` with `idx_posts_audit_tablename`, `idx_posts_audit_changedate`
|
||||
|
||||
### Temporal/Versioning Tables
|
||||
|
||||
- Use suffix `_history` or `_versions` if needed
|
||||
- Apply standard naming rules with the full table name
|
||||
- Examples:
|
||||
- `idx_users_history_user_id`
|
||||
- `uk_posts_versions_version_number`
|
||||
|
||||
### Schema-Specific Objects
|
||||
|
||||
- Always qualify with schema when needed: `pk_<schema>_<table>`
|
||||
- Multiple schemas allowed: `pk_public_users`, `pk_staging_users`
|
||||
|
||||
### Reserved Words & Special Names
|
||||
|
||||
- Avoid PostgreSQL reserved keywords in object names
|
||||
- If column/table names conflict, use quoted identifiers in DDL
|
||||
- Naming convention rules still apply to the logical name
|
||||
|
||||
### Generated/Anonymous Indexes
|
||||
|
||||
- If an index lacks explicit naming, default to: `idx_<schema>_<table>`
|
||||
- Should be replaced with explicit names following standards
|
||||
- Examples (to be renamed):
|
||||
- `idx_public_users` → should be `idx_users_<column>`
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Code Generation
|
||||
|
||||
- Names are always lowercase in generated SQL
|
||||
- Underscore separators are required
|
||||
|
||||
### Migration Safety
|
||||
|
||||
- Do NOT rename objects after creation without explicit migration
|
||||
- Names should be consistent across all schema versions
|
||||
- Test generated DDL against PostgreSQL before deployment
|
||||
|
||||
### Testing
|
||||
|
||||
- Ensure consistency across all table and constraint generation
|
||||
- Test with reserved words to verify escaping
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- PostgreSQL Identifier Rules: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-IDENTIFIERS
|
||||
- Constraint Documentation: https://www.postgresql.org/docs/current/ddl-constraints.html
|
||||
- Index Documentation: https://www.postgresql.org/docs/current/indexes.html
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
@@ -335,7 +336,7 @@ func (w *MigrationWriter) generateAlterTableScripts(schema *models.Schema, model
|
||||
SchemaName: schema.Name,
|
||||
TableName: modelTable.Name,
|
||||
ColumnName: modelCol.Name,
|
||||
ColumnType: modelCol.Type,
|
||||
ColumnType: pgsql.ConvertSQLType(modelCol.Type),
|
||||
Default: defaultVal,
|
||||
NotNull: modelCol.NotNull,
|
||||
})
|
||||
@@ -359,7 +360,7 @@ func (w *MigrationWriter) generateAlterTableScripts(schema *models.Schema, model
|
||||
SchemaName: schema.Name,
|
||||
TableName: modelTable.Name,
|
||||
ColumnName: modelCol.Name,
|
||||
NewType: modelCol.Type,
|
||||
NewType: pgsql.ConvertSQLType(modelCol.Type),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -427,9 +428,11 @@ func (w *MigrationWriter) generateIndexScripts(model *models.Schema, current *mo
|
||||
for _, modelTable := range model.Tables {
|
||||
currentTable := currentTables[strings.ToLower(modelTable.Name)]
|
||||
|
||||
// Process primary keys first
|
||||
// Process primary keys first - check explicit constraints
|
||||
foundExplicitPK := false
|
||||
for constraintName, constraint := range modelTable.Constraints {
|
||||
if constraint.Type == models.PrimaryKeyConstraint {
|
||||
foundExplicitPK = true
|
||||
shouldCreate := true
|
||||
|
||||
if currentTable != nil {
|
||||
@@ -464,6 +467,53 @@ func (w *MigrationWriter) generateIndexScripts(model *models.Schema, current *mo
|
||||
}
|
||||
}
|
||||
|
||||
// If no explicit PK constraint, check for columns with IsPrimaryKey = true
|
||||
if !foundExplicitPK {
|
||||
pkColumns := []string{}
|
||||
for _, col := range modelTable.Columns {
|
||||
if col.IsPrimaryKey {
|
||||
pkColumns = append(pkColumns, col.SQLName())
|
||||
}
|
||||
}
|
||||
if len(pkColumns) > 0 {
|
||||
sort.Strings(pkColumns)
|
||||
constraintName := fmt.Sprintf("pk_%s_%s", model.SQLName(), modelTable.SQLName())
|
||||
shouldCreate := true
|
||||
|
||||
if currentTable != nil {
|
||||
// Check if a PK constraint already exists (by any name)
|
||||
for _, constraint := range currentTable.Constraints {
|
||||
if constraint.Type == models.PrimaryKeyConstraint {
|
||||
shouldCreate = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if shouldCreate {
|
||||
sql, err := w.executor.ExecuteCreatePrimaryKey(CreatePrimaryKeyData{
|
||||
SchemaName: model.Name,
|
||||
TableName: modelTable.Name,
|
||||
ConstraintName: constraintName,
|
||||
Columns: strings.Join(pkColumns, ", "),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
script := MigrationScript{
|
||||
ObjectName: fmt.Sprintf("%s.%s.%s", model.Name, modelTable.Name, constraintName),
|
||||
ObjectType: "create primary key",
|
||||
Schema: model.Name,
|
||||
Priority: 160,
|
||||
Sequence: len(scripts),
|
||||
Body: sql,
|
||||
}
|
||||
scripts = append(scripts, script)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process indexes
|
||||
for indexName, modelIndex := range modelTable.Indexes {
|
||||
// Skip primary key indexes
|
||||
@@ -703,7 +753,7 @@ func (w *MigrationWriter) generateAuditScripts(schema *models.Schema, auditConfi
|
||||
}
|
||||
|
||||
// Generate audit function
|
||||
funcName := fmt.Sprintf("ft_audit_%s", table.Name)
|
||||
funcName := fmt.Sprintf("tf_audit_%s", table.Name)
|
||||
funcData := BuildAuditFunctionData(schema.Name, table, pk, config, auditSchema, auditConfig.UserFunction)
|
||||
|
||||
funcSQL, err := w.executor.ExecuteAuditFunction(funcData)
|
||||
|
||||
@@ -121,7 +121,7 @@ func TestWriteMigration_WithAudit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify audit function
|
||||
if !strings.Contains(output, "CREATE OR REPLACE FUNCTION public.ft_audit_users()") {
|
||||
if !strings.Contains(output, "CREATE OR REPLACE FUNCTION public.tf_audit_users()") {
|
||||
t.Error("Migration missing audit function")
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ func TestTemplateExecutor_AuditFunction(t *testing.T) {
|
||||
|
||||
data := AuditFunctionData{
|
||||
SchemaName: "public",
|
||||
FunctionName: "ft_audit_users",
|
||||
FunctionName: "tf_audit_users",
|
||||
TableName: "users",
|
||||
TablePrefix: "NULL",
|
||||
PrimaryKey: "id",
|
||||
@@ -202,7 +202,7 @@ func TestTemplateExecutor_AuditFunction(t *testing.T) {
|
||||
|
||||
t.Logf("Generated SQL:\n%s", sql)
|
||||
|
||||
if !strings.Contains(sql, "CREATE OR REPLACE FUNCTION public.ft_audit_users()") {
|
||||
if !strings.Contains(sql, "CREATE OR REPLACE FUNCTION public.tf_audit_users()") {
|
||||
t.Error("SQL missing function definition")
|
||||
}
|
||||
if !strings.Contains(sql, "IF TG_OP = 'INSERT'") {
|
||||
|
||||
@@ -355,7 +355,7 @@ func BuildAuditFunctionData(
|
||||
auditSchema string,
|
||||
userFunction string,
|
||||
) AuditFunctionData {
|
||||
funcName := fmt.Sprintf("ft_audit_%s", table.Name)
|
||||
funcName := fmt.Sprintf("tf_audit_%s", table.Name)
|
||||
|
||||
// Build list of audited columns
|
||||
auditedColumns := make([]*models.Column, 0)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -45,11 +45,11 @@ func TestWriteDatabase(t *testing.T) {
|
||||
|
||||
// Add unique index
|
||||
uniqueEmailIndex := &models.Index{
|
||||
Name: "uk_users_email",
|
||||
Name: "uidx_users_email",
|
||||
Unique: true,
|
||||
Columns: []string{"email"},
|
||||
}
|
||||
table.Indexes["uk_users_email"] = uniqueEmailIndex
|
||||
table.Indexes["uidx_users_email"] = uniqueEmailIndex
|
||||
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
@@ -164,6 +164,296 @@ func TestWriteForeignKeys(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteUniqueConstraints(t *testing.T) {
|
||||
// Create a test database with unique constraints
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("public")
|
||||
|
||||
// Create table with unique constraints
|
||||
table := models.InitTable("users", "public")
|
||||
|
||||
// Add columns
|
||||
emailCol := models.InitColumn("email", "users", "public")
|
||||
emailCol.Type = "varchar(255)"
|
||||
emailCol.NotNull = true
|
||||
table.Columns["email"] = emailCol
|
||||
|
||||
guidCol := models.InitColumn("guid", "users", "public")
|
||||
guidCol.Type = "uuid"
|
||||
guidCol.NotNull = true
|
||||
table.Columns["guid"] = guidCol
|
||||
|
||||
// Add unique constraints
|
||||
emailConstraint := &models.Constraint{
|
||||
Name: "uq_email",
|
||||
Type: models.UniqueConstraint,
|
||||
Schema: "public",
|
||||
Table: "users",
|
||||
Columns: []string{"email"},
|
||||
}
|
||||
table.Constraints["uq_email"] = emailConstraint
|
||||
|
||||
guidConstraint := &models.Constraint{
|
||||
Name: "uq_guid",
|
||||
Type: models.UniqueConstraint,
|
||||
Schema: "public",
|
||||
Table: "users",
|
||||
Columns: []string{"guid"},
|
||||
}
|
||||
table.Constraints["uq_guid"] = guidConstraint
|
||||
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer with output to buffer
|
||||
var buf bytes.Buffer
|
||||
options := &writers.WriterOptions{}
|
||||
writer := NewWriter(options)
|
||||
writer.writer = &buf
|
||||
|
||||
// Write the database
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
|
||||
// Print output for debugging
|
||||
t.Logf("Generated SQL:\n%s", output)
|
||||
|
||||
// Verify unique constraints are present
|
||||
if !strings.Contains(output, "-- Unique constraints for schema: public") {
|
||||
t.Errorf("Output missing unique constraints header")
|
||||
}
|
||||
if !strings.Contains(output, "ADD CONSTRAINT uq_email UNIQUE (email)") {
|
||||
t.Errorf("Output missing uq_email unique constraint\nFull output:\n%s", output)
|
||||
}
|
||||
if !strings.Contains(output, "ADD CONSTRAINT uq_guid UNIQUE (guid)") {
|
||||
t.Errorf("Output missing uq_guid unique constraint\nFull output:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteCheckConstraints(t *testing.T) {
|
||||
// Create a test database with check constraints
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("public")
|
||||
|
||||
// Create table with check constraints
|
||||
table := models.InitTable("products", "public")
|
||||
|
||||
// Add columns
|
||||
priceCol := models.InitColumn("price", "products", "public")
|
||||
priceCol.Type = "numeric(10,2)"
|
||||
table.Columns["price"] = priceCol
|
||||
|
||||
statusCol := models.InitColumn("status", "products", "public")
|
||||
statusCol.Type = "varchar(20)"
|
||||
table.Columns["status"] = statusCol
|
||||
|
||||
quantityCol := models.InitColumn("quantity", "products", "public")
|
||||
quantityCol.Type = "integer"
|
||||
table.Columns["quantity"] = quantityCol
|
||||
|
||||
// Add check constraints
|
||||
priceConstraint := &models.Constraint{
|
||||
Name: "ck_price_positive",
|
||||
Type: models.CheckConstraint,
|
||||
Schema: "public",
|
||||
Table: "products",
|
||||
Expression: "price >= 0",
|
||||
}
|
||||
table.Constraints["ck_price_positive"] = priceConstraint
|
||||
|
||||
statusConstraint := &models.Constraint{
|
||||
Name: "ck_status_valid",
|
||||
Type: models.CheckConstraint,
|
||||
Schema: "public",
|
||||
Table: "products",
|
||||
Expression: "status IN ('active', 'inactive', 'discontinued')",
|
||||
}
|
||||
table.Constraints["ck_status_valid"] = statusConstraint
|
||||
|
||||
quantityConstraint := &models.Constraint{
|
||||
Name: "ck_quantity_nonnegative",
|
||||
Type: models.CheckConstraint,
|
||||
Schema: "public",
|
||||
Table: "products",
|
||||
Expression: "quantity >= 0",
|
||||
}
|
||||
table.Constraints["ck_quantity_nonnegative"] = quantityConstraint
|
||||
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer with output to buffer
|
||||
var buf bytes.Buffer
|
||||
options := &writers.WriterOptions{}
|
||||
writer := NewWriter(options)
|
||||
writer.writer = &buf
|
||||
|
||||
// Write the database
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
|
||||
// Print output for debugging
|
||||
t.Logf("Generated SQL:\n%s", output)
|
||||
|
||||
// Verify check constraints are present
|
||||
if !strings.Contains(output, "-- Check constraints for schema: public") {
|
||||
t.Errorf("Output missing check constraints header")
|
||||
}
|
||||
if !strings.Contains(output, "ADD CONSTRAINT ck_price_positive CHECK (price >= 0)") {
|
||||
t.Errorf("Output missing ck_price_positive check constraint\nFull output:\n%s", output)
|
||||
}
|
||||
if !strings.Contains(output, "ADD CONSTRAINT ck_status_valid CHECK (status IN ('active', 'inactive', 'discontinued'))") {
|
||||
t.Errorf("Output missing ck_status_valid check constraint\nFull output:\n%s", output)
|
||||
}
|
||||
if !strings.Contains(output, "ADD CONSTRAINT ck_quantity_nonnegative CHECK (quantity >= 0)") {
|
||||
t.Errorf("Output missing ck_quantity_nonnegative check constraint\nFull output:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAllConstraintTypes(t *testing.T) {
|
||||
// Create a comprehensive test with all constraint types
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("public")
|
||||
|
||||
// Create orders table
|
||||
ordersTable := models.InitTable("orders", "public")
|
||||
|
||||
// Add columns
|
||||
idCol := models.InitColumn("id", "orders", "public")
|
||||
idCol.Type = "integer"
|
||||
idCol.IsPrimaryKey = true
|
||||
ordersTable.Columns["id"] = idCol
|
||||
|
||||
userIDCol := models.InitColumn("user_id", "orders", "public")
|
||||
userIDCol.Type = "integer"
|
||||
userIDCol.NotNull = true
|
||||
ordersTable.Columns["user_id"] = userIDCol
|
||||
|
||||
orderNumberCol := models.InitColumn("order_number", "orders", "public")
|
||||
orderNumberCol.Type = "varchar(50)"
|
||||
orderNumberCol.NotNull = true
|
||||
ordersTable.Columns["order_number"] = orderNumberCol
|
||||
|
||||
totalCol := models.InitColumn("total", "orders", "public")
|
||||
totalCol.Type = "numeric(10,2)"
|
||||
ordersTable.Columns["total"] = totalCol
|
||||
|
||||
statusCol := models.InitColumn("status", "orders", "public")
|
||||
statusCol.Type = "varchar(20)"
|
||||
ordersTable.Columns["status"] = statusCol
|
||||
|
||||
// Add primary key constraint
|
||||
pkConstraint := &models.Constraint{
|
||||
Name: "pk_orders",
|
||||
Type: models.PrimaryKeyConstraint,
|
||||
Schema: "public",
|
||||
Table: "orders",
|
||||
Columns: []string{"id"},
|
||||
}
|
||||
ordersTable.Constraints["pk_orders"] = pkConstraint
|
||||
|
||||
// Add unique constraint
|
||||
uniqueConstraint := &models.Constraint{
|
||||
Name: "uq_order_number",
|
||||
Type: models.UniqueConstraint,
|
||||
Schema: "public",
|
||||
Table: "orders",
|
||||
Columns: []string{"order_number"},
|
||||
}
|
||||
ordersTable.Constraints["uq_order_number"] = uniqueConstraint
|
||||
|
||||
// Add check constraint
|
||||
checkConstraint := &models.Constraint{
|
||||
Name: "ck_total_positive",
|
||||
Type: models.CheckConstraint,
|
||||
Schema: "public",
|
||||
Table: "orders",
|
||||
Expression: "total > 0",
|
||||
}
|
||||
ordersTable.Constraints["ck_total_positive"] = checkConstraint
|
||||
|
||||
statusCheckConstraint := &models.Constraint{
|
||||
Name: "ck_status_valid",
|
||||
Type: models.CheckConstraint,
|
||||
Schema: "public",
|
||||
Table: "orders",
|
||||
Expression: "status IN ('pending', 'completed', 'cancelled')",
|
||||
}
|
||||
ordersTable.Constraints["ck_status_valid"] = statusCheckConstraint
|
||||
|
||||
// Add foreign key constraint (referencing a users table)
|
||||
fkConstraint := &models.Constraint{
|
||||
Name: "fk_orders_user",
|
||||
Type: models.ForeignKeyConstraint,
|
||||
Schema: "public",
|
||||
Table: "orders",
|
||||
Columns: []string{"user_id"},
|
||||
ReferencedSchema: "public",
|
||||
ReferencedTable: "users",
|
||||
ReferencedColumns: []string{"id"},
|
||||
OnDelete: "CASCADE",
|
||||
OnUpdate: "CASCADE",
|
||||
}
|
||||
ordersTable.Constraints["fk_orders_user"] = fkConstraint
|
||||
|
||||
schema.Tables = append(schema.Tables, ordersTable)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer with output to buffer
|
||||
var buf bytes.Buffer
|
||||
options := &writers.WriterOptions{}
|
||||
writer := NewWriter(options)
|
||||
writer.writer = &buf
|
||||
|
||||
// Write the database
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
|
||||
// Print output for debugging
|
||||
t.Logf("Generated SQL:\n%s", output)
|
||||
|
||||
// Verify all constraint types are present
|
||||
expectedConstraints := map[string]string{
|
||||
"Primary Key": "PRIMARY KEY",
|
||||
"Unique": "ADD CONSTRAINT uq_order_number UNIQUE (order_number)",
|
||||
"Check (total)": "ADD CONSTRAINT ck_total_positive CHECK (total > 0)",
|
||||
"Check (status)": "ADD CONSTRAINT ck_status_valid CHECK (status IN ('pending', 'completed', 'cancelled'))",
|
||||
"Foreign Key": "FOREIGN KEY",
|
||||
}
|
||||
|
||||
for name, expected := range expectedConstraints {
|
||||
if !strings.Contains(output, expected) {
|
||||
t.Errorf("Output missing %s constraint: %s\nFull output:\n%s", name, expected, output)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify section headers
|
||||
sections := []string{
|
||||
"-- Primary keys for schema: public",
|
||||
"-- Unique constraints for schema: public",
|
||||
"-- Check constraints for schema: public",
|
||||
"-- Foreign keys for schema: public",
|
||||
}
|
||||
|
||||
for _, section := range sections {
|
||||
if !strings.Contains(output, section) {
|
||||
t.Errorf("Output missing section header: %s", section)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteTable(t *testing.T) {
|
||||
// Create a single table
|
||||
table := models.InitTable("products", "public")
|
||||
@@ -241,3 +531,327 @@ func TestIsIntegerType(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeConversion(t *testing.T) {
|
||||
// Test that invalid Go types are converted to valid PostgreSQL types
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("public")
|
||||
|
||||
// Create a test table with Go types instead of SQL types
|
||||
table := models.InitTable("test_types", "public")
|
||||
|
||||
// Add columns with Go types (invalid for PostgreSQL)
|
||||
stringCol := models.InitColumn("name", "test_types", "public")
|
||||
stringCol.Type = "string" // Should be converted to "text"
|
||||
table.Columns["name"] = stringCol
|
||||
|
||||
int64Col := models.InitColumn("big_id", "test_types", "public")
|
||||
int64Col.Type = "int64" // Should be converted to "bigint"
|
||||
table.Columns["big_id"] = int64Col
|
||||
|
||||
int16Col := models.InitColumn("small_id", "test_types", "public")
|
||||
int16Col.Type = "int16" // Should be converted to "smallint"
|
||||
table.Columns["small_id"] = int16Col
|
||||
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer with output to buffer
|
||||
var buf bytes.Buffer
|
||||
options := &writers.WriterOptions{}
|
||||
writer := NewWriter(options)
|
||||
writer.writer = &buf
|
||||
|
||||
// Write the database
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
|
||||
// Print output for debugging
|
||||
t.Logf("Generated SQL:\n%s", output)
|
||||
|
||||
// Verify that Go types were converted to PostgreSQL types
|
||||
if strings.Contains(output, "string") {
|
||||
t.Errorf("Output contains 'string' type - should be converted to 'text'\nFull output:\n%s", output)
|
||||
}
|
||||
if strings.Contains(output, "int64") {
|
||||
t.Errorf("Output contains 'int64' type - should be converted to 'bigint'\nFull output:\n%s", output)
|
||||
}
|
||||
if strings.Contains(output, "int16") {
|
||||
t.Errorf("Output contains 'int16' type - should be converted to 'smallint'\nFull output:\n%s", output)
|
||||
}
|
||||
|
||||
// Verify correct PostgreSQL types are present
|
||||
if !strings.Contains(output, "text") {
|
||||
t.Errorf("Output missing 'text' type (converted from 'string')\nFull output:\n%s", output)
|
||||
}
|
||||
if !strings.Contains(output, "bigint") {
|
||||
t.Errorf("Output missing 'bigint' type (converted from 'int64')\nFull output:\n%s", output)
|
||||
}
|
||||
if !strings.Contains(output, "smallint") {
|
||||
t.Errorf("Output missing 'smallint' type (converted from 'int16')\nFull output:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrimaryKeyExistenceCheck(t *testing.T) {
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("public")
|
||||
table := models.InitTable("products", "public")
|
||||
|
||||
idCol := models.InitColumn("id", "products", "public")
|
||||
idCol.Type = "integer"
|
||||
idCol.IsPrimaryKey = true
|
||||
table.Columns["id"] = idCol
|
||||
|
||||
nameCol := models.InitColumn("name", "products", "public")
|
||||
nameCol.Type = "text"
|
||||
table.Columns["name"] = nameCol
|
||||
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
var buf bytes.Buffer
|
||||
options := &writers.WriterOptions{}
|
||||
writer := NewWriter(options)
|
||||
writer.writer = &buf
|
||||
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
t.Logf("Generated SQL:\n%s", output)
|
||||
|
||||
// Verify our naming convention is used
|
||||
if !strings.Contains(output, "pk_public_products") {
|
||||
t.Errorf("Output missing expected primary key name 'pk_public_products'\nFull output:\n%s", output)
|
||||
}
|
||||
|
||||
// Verify it drops auto-generated primary keys
|
||||
if !strings.Contains(output, "products_pkey") || !strings.Contains(output, "DROP CONSTRAINT") {
|
||||
t.Errorf("Output missing logic to drop auto-generated primary key\nFull output:\n%s", output)
|
||||
}
|
||||
|
||||
// Verify it checks for our specific named constraint before adding it
|
||||
if !strings.Contains(output, "constraint_name = 'pk_public_products'") {
|
||||
t.Errorf("Output missing check for our named primary key constraint\nFull output:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestColumnSizeSpecifiers(t *testing.T) {
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("public")
|
||||
table := models.InitTable("test_sizes", "public")
|
||||
|
||||
// Integer with invalid size specifier - should ignore size
|
||||
integerCol := models.InitColumn("int_col", "test_sizes", "public")
|
||||
integerCol.Type = "integer"
|
||||
integerCol.Length = 32
|
||||
table.Columns["int_col"] = integerCol
|
||||
|
||||
// Bigint with invalid size specifier - should ignore size
|
||||
bigintCol := models.InitColumn("bigint_col", "test_sizes", "public")
|
||||
bigintCol.Type = "bigint"
|
||||
bigintCol.Length = 64
|
||||
table.Columns["bigint_col"] = bigintCol
|
||||
|
||||
// Smallint with invalid size specifier - should ignore size
|
||||
smallintCol := models.InitColumn("smallint_col", "test_sizes", "public")
|
||||
smallintCol.Type = "smallint"
|
||||
smallintCol.Length = 16
|
||||
table.Columns["smallint_col"] = smallintCol
|
||||
|
||||
// Text with length - should convert to varchar
|
||||
textCol := models.InitColumn("text_col", "test_sizes", "public")
|
||||
textCol.Type = "text"
|
||||
textCol.Length = 100
|
||||
table.Columns["text_col"] = textCol
|
||||
|
||||
// Varchar with length - should keep varchar with length
|
||||
varcharCol := models.InitColumn("varchar_col", "test_sizes", "public")
|
||||
varcharCol.Type = "varchar"
|
||||
varcharCol.Length = 50
|
||||
table.Columns["varchar_col"] = varcharCol
|
||||
|
||||
// Decimal with precision and scale - should keep them
|
||||
decimalCol := models.InitColumn("decimal_col", "test_sizes", "public")
|
||||
decimalCol.Type = "decimal"
|
||||
decimalCol.Precision = 19
|
||||
decimalCol.Scale = 4
|
||||
table.Columns["decimal_col"] = decimalCol
|
||||
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
var buf bytes.Buffer
|
||||
options := &writers.WriterOptions{}
|
||||
writer := NewWriter(options)
|
||||
writer.writer = &buf
|
||||
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
t.Logf("Generated SQL:\n%s", output)
|
||||
|
||||
// Verify invalid size specifiers are NOT present
|
||||
invalidPatterns := []string{
|
||||
"integer(32)",
|
||||
"bigint(64)",
|
||||
"smallint(16)",
|
||||
"text(100)",
|
||||
}
|
||||
for _, pattern := range invalidPatterns {
|
||||
if strings.Contains(output, pattern) {
|
||||
t.Errorf("Output contains invalid pattern '%s' - PostgreSQL doesn't support this\nFull output:\n%s", pattern, output)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify valid patterns ARE present
|
||||
validPatterns := []string{
|
||||
"integer", // without size
|
||||
"bigint", // without size
|
||||
"smallint", // without size
|
||||
"varchar(100)", // text converted to varchar with length
|
||||
"varchar(50)", // varchar with length
|
||||
"decimal(19,4)", // decimal with precision and scale
|
||||
}
|
||||
for _, pattern := range validPatterns {
|
||||
if !strings.Contains(output, pattern) {
|
||||
t.Errorf("Output missing expected pattern '%s'\nFull output:\n%s", pattern, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateAddColumnStatements(t *testing.T) {
|
||||
// Create a test database with tables that have new columns
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("public")
|
||||
|
||||
// Create a table with columns
|
||||
table := models.InitTable("users", "public")
|
||||
|
||||
// Existing column
|
||||
idCol := models.InitColumn("id", "users", "public")
|
||||
idCol.Type = "integer"
|
||||
idCol.NotNull = true
|
||||
idCol.Sequence = 1
|
||||
table.Columns["id"] = idCol
|
||||
|
||||
// New column to be added
|
||||
emailCol := models.InitColumn("email", "users", "public")
|
||||
emailCol.Type = "varchar"
|
||||
emailCol.Length = 255
|
||||
emailCol.NotNull = true
|
||||
emailCol.Sequence = 2
|
||||
table.Columns["email"] = emailCol
|
||||
|
||||
// New column with default
|
||||
statusCol := models.InitColumn("status", "users", "public")
|
||||
statusCol.Type = "text"
|
||||
statusCol.Default = "active"
|
||||
statusCol.Sequence = 3
|
||||
table.Columns["status"] = statusCol
|
||||
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer
|
||||
options := &writers.WriterOptions{}
|
||||
writer := NewWriter(options)
|
||||
|
||||
// Generate ADD COLUMN statements
|
||||
statements, err := writer.GenerateAddColumnsForDatabase(db)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateAddColumnsForDatabase failed: %v", err)
|
||||
}
|
||||
|
||||
// Join all statements to verify content
|
||||
output := strings.Join(statements, "\n")
|
||||
t.Logf("Generated ADD COLUMN statements:\n%s", output)
|
||||
|
||||
// Verify expected elements
|
||||
expectedStrings := []string{
|
||||
"ALTER TABLE public.users ADD COLUMN id integer NOT NULL",
|
||||
"ALTER TABLE public.users ADD COLUMN email varchar(255) NOT NULL",
|
||||
"ALTER TABLE public.users ADD COLUMN status text DEFAULT 'active'",
|
||||
"information_schema.columns",
|
||||
"table_schema = 'public'",
|
||||
"table_name = 'users'",
|
||||
"column_name = 'id'",
|
||||
"column_name = 'email'",
|
||||
"column_name = 'status'",
|
||||
}
|
||||
|
||||
for _, expected := range expectedStrings {
|
||||
if !strings.Contains(output, expected) {
|
||||
t.Errorf("Output missing expected string: %s\nFull output:\n%s", expected, output)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify DO blocks are present for conditional adds
|
||||
doBlockCount := strings.Count(output, "DO $$")
|
||||
if doBlockCount < 3 {
|
||||
t.Errorf("Expected at least 3 DO blocks (one per column), got %d", doBlockCount)
|
||||
}
|
||||
|
||||
// Verify IF NOT EXISTS logic
|
||||
ifNotExistsCount := strings.Count(output, "IF NOT EXISTS")
|
||||
if ifNotExistsCount < 3 {
|
||||
t.Errorf("Expected at least 3 IF NOT EXISTS checks (one per column), got %d", ifNotExistsCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAddColumnStatements(t *testing.T) {
|
||||
// Create a test database
|
||||
db := models.InitDatabase("testdb")
|
||||
schema := models.InitSchema("public")
|
||||
|
||||
// Create a table with a new column to be added
|
||||
table := models.InitTable("products", "public")
|
||||
|
||||
idCol := models.InitColumn("id", "products", "public")
|
||||
idCol.Type = "integer"
|
||||
table.Columns["id"] = idCol
|
||||
|
||||
// New column with various properties
|
||||
descCol := models.InitColumn("description", "products", "public")
|
||||
descCol.Type = "text"
|
||||
descCol.NotNull = false
|
||||
table.Columns["description"] = descCol
|
||||
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
// Create writer with output to buffer
|
||||
var buf bytes.Buffer
|
||||
options := &writers.WriterOptions{}
|
||||
writer := NewWriter(options)
|
||||
writer.writer = &buf
|
||||
|
||||
// Write ADD COLUMN statements
|
||||
err := writer.WriteAddColumnStatements(db)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteAddColumnStatements failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
t.Logf("Generated output:\n%s", output)
|
||||
|
||||
// Verify output contains expected elements
|
||||
if !strings.Contains(output, "ALTER TABLE public.products ADD COLUMN id integer") {
|
||||
t.Errorf("Output missing ADD COLUMN for id\nFull output:\n%s", output)
|
||||
}
|
||||
if !strings.Contains(output, "ALTER TABLE public.products ADD COLUMN description text") {
|
||||
t.Errorf("Output missing ADD COLUMN for description\nFull output:\n%s", output)
|
||||
}
|
||||
if !strings.Contains(output, "DO $$") {
|
||||
t.Errorf("Output missing DO block\nFull output:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ The SQL Executor Writer (`sqlexec`) executes SQL scripts from `models.Script` ob
|
||||
|
||||
## Features
|
||||
|
||||
- **Ordered Execution**: Scripts execute in Priority→Sequence order
|
||||
- **Ordered Execution**: Scripts execute in Priority→Sequence→Name order
|
||||
- **PostgreSQL Support**: Uses `pgx/v5` driver for robust PostgreSQL connectivity
|
||||
- **Stop on Error**: Execution halts immediately on first error (default behavior)
|
||||
- **Progress Reporting**: Prints execution status to stdout
|
||||
@@ -103,19 +103,40 @@ Scripts are sorted and executed based on:
|
||||
|
||||
1. **Priority** (ascending): Lower priority values execute first
|
||||
2. **Sequence** (ascending): Within same priority, lower sequence values execute first
|
||||
3. **Name** (ascending): Within same priority and sequence, alphabetical order by name
|
||||
|
||||
### Example Execution Order
|
||||
|
||||
Given these scripts:
|
||||
```
|
||||
Script A: Priority=2, Sequence=1
|
||||
Script B: Priority=1, Sequence=3
|
||||
Script C: Priority=1, Sequence=1
|
||||
Script D: Priority=1, Sequence=2
|
||||
Script E: Priority=3, Sequence=1
|
||||
Script A: Priority=2, Sequence=1, Name="zebra"
|
||||
Script B: Priority=1, Sequence=3, Name="script"
|
||||
Script C: Priority=1, Sequence=1, Name="apple"
|
||||
Script D: Priority=1, Sequence=1, Name="beta"
|
||||
Script E: Priority=3, Sequence=1, Name="script"
|
||||
```
|
||||
|
||||
Execution order: **C → D → B → A → E**
|
||||
Execution order: **C (apple) → D (beta) → B → A → E**
|
||||
|
||||
### Directory-based Sorting Example
|
||||
|
||||
Given these files:
|
||||
```
|
||||
1_001_create_schema.sql
|
||||
1_001_create_users.sql ← Alphabetically before "drop_tables"
|
||||
1_001_drop_tables.sql
|
||||
1_002_add_indexes.sql
|
||||
2_001_constraints.sql
|
||||
```
|
||||
|
||||
Execution order (note alphabetical sorting at same priority/sequence):
|
||||
```
|
||||
1_001_create_schema.sql
|
||||
1_001_create_users.sql
|
||||
1_001_drop_tables.sql
|
||||
1_002_add_indexes.sql
|
||||
2_001_constraints.sql
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
|
||||
@@ -23,6 +23,11 @@ func NewWriter(options *writers.WriterOptions) *Writer {
|
||||
}
|
||||
}
|
||||
|
||||
// Options returns the writer options (useful for reading execution results)
|
||||
func (w *Writer) Options() *writers.WriterOptions {
|
||||
return w.options
|
||||
}
|
||||
|
||||
// WriteDatabase executes all scripts from all schemas in the database
|
||||
func (w *Writer) WriteDatabase(db *models.Database) error {
|
||||
if db == nil {
|
||||
@@ -86,20 +91,39 @@ func (w *Writer) WriteTable(table *models.Table) error {
|
||||
return fmt.Errorf("WriteTable is not supported for SQL script execution")
|
||||
}
|
||||
|
||||
// executeScripts executes scripts in Priority then Sequence order
|
||||
// executeScripts executes scripts in Priority, Sequence, then Name order
|
||||
func (w *Writer) executeScripts(ctx context.Context, conn *pgx.Conn, scripts []*models.Script) error {
|
||||
if len(scripts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort scripts by Priority (ascending) then Sequence (ascending)
|
||||
// Check if we should ignore errors
|
||||
ignoreErrors := false
|
||||
if val, ok := w.options.Metadata["ignore_errors"].(bool); ok {
|
||||
ignoreErrors = val
|
||||
}
|
||||
|
||||
// Track failed scripts and execution counts
|
||||
var failedScripts []struct {
|
||||
name string
|
||||
priority int
|
||||
sequence uint
|
||||
err error
|
||||
}
|
||||
successCount := 0
|
||||
totalCount := 0
|
||||
|
||||
// Sort scripts by Priority (ascending), Sequence (ascending), then Name (ascending)
|
||||
sortedScripts := make([]*models.Script, len(scripts))
|
||||
copy(sortedScripts, scripts)
|
||||
sort.Slice(sortedScripts, func(i, j int) bool {
|
||||
if sortedScripts[i].Priority != sortedScripts[j].Priority {
|
||||
return sortedScripts[i].Priority < sortedScripts[j].Priority
|
||||
}
|
||||
return sortedScripts[i].Sequence < sortedScripts[j].Sequence
|
||||
if sortedScripts[i].Sequence != sortedScripts[j].Sequence {
|
||||
return sortedScripts[i].Sequence < sortedScripts[j].Sequence
|
||||
}
|
||||
return sortedScripts[i].Name < sortedScripts[j].Name
|
||||
})
|
||||
|
||||
// Execute each script in order
|
||||
@@ -108,18 +132,49 @@ func (w *Writer) executeScripts(ctx context.Context, conn *pgx.Conn, scripts []*
|
||||
continue
|
||||
}
|
||||
|
||||
totalCount++
|
||||
fmt.Printf("Executing script: %s (Priority=%d, Sequence=%d)\n",
|
||||
script.Name, script.Priority, script.Sequence)
|
||||
|
||||
// Execute the SQL script
|
||||
_, err := conn.Exec(ctx, script.SQL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute script %s (Priority=%d, Sequence=%d): %w",
|
||||
if ignoreErrors {
|
||||
fmt.Printf("⚠ Error executing %s: %v (continuing due to --ignore-errors)\n", script.Name, err)
|
||||
failedScripts = append(failedScripts, struct {
|
||||
name string
|
||||
priority int
|
||||
sequence uint
|
||||
err error
|
||||
}{
|
||||
name: script.Name,
|
||||
priority: script.Priority,
|
||||
sequence: script.Sequence,
|
||||
err: err,
|
||||
})
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("script %s (Priority=%d, Sequence=%d): %w",
|
||||
script.Name, script.Priority, script.Sequence, err)
|
||||
}
|
||||
|
||||
successCount++
|
||||
fmt.Printf("✓ Successfully executed: %s\n", script.Name)
|
||||
}
|
||||
|
||||
// Store execution results in metadata for caller
|
||||
w.options.Metadata["execution_total"] = totalCount
|
||||
w.options.Metadata["execution_success"] = successCount
|
||||
w.options.Metadata["execution_failed"] = len(failedScripts)
|
||||
|
||||
// Print summary of failed scripts if any
|
||||
if len(failedScripts) > 0 {
|
||||
fmt.Printf("\n⚠ Failed Scripts Summary (%d failed):\n", len(failedScripts))
|
||||
for i, failed := range failedScripts {
|
||||
fmt.Printf(" %d. %s (Priority=%d, Sequence=%d)\n Error: %v\n",
|
||||
i+1, failed.name, failed.priority, failed.sequence, failed.err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -99,13 +99,13 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestScriptSorting verifies that scripts are sorted correctly by Priority then Sequence
|
||||
// TestScriptSorting verifies that scripts are sorted correctly by Priority, Sequence, then Name
|
||||
func TestScriptSorting(t *testing.T) {
|
||||
scripts := []*models.Script{
|
||||
{Name: "script1", Priority: 2, Sequence: 1, SQL: "SELECT 1;"},
|
||||
{Name: "z_script1", Priority: 2, Sequence: 1, SQL: "SELECT 1;"},
|
||||
{Name: "script2", Priority: 1, Sequence: 3, SQL: "SELECT 2;"},
|
||||
{Name: "script3", Priority: 1, Sequence: 1, SQL: "SELECT 3;"},
|
||||
{Name: "script4", Priority: 1, Sequence: 2, SQL: "SELECT 4;"},
|
||||
{Name: "a_script3", Priority: 1, Sequence: 1, SQL: "SELECT 3;"},
|
||||
{Name: "b_script4", Priority: 1, Sequence: 1, SQL: "SELECT 4;"},
|
||||
{Name: "script5", Priority: 3, Sequence: 1, SQL: "SELECT 5;"},
|
||||
{Name: "script6", Priority: 2, Sequence: 2, SQL: "SELECT 6;"},
|
||||
}
|
||||
@@ -114,25 +114,35 @@ func TestScriptSorting(t *testing.T) {
|
||||
sortedScripts := make([]*models.Script, len(scripts))
|
||||
copy(sortedScripts, scripts)
|
||||
|
||||
// Use the same sorting logic from executeScripts
|
||||
// Sort by Priority, Sequence, then Name (matching executeScripts logic)
|
||||
for i := 0; i < len(sortedScripts)-1; i++ {
|
||||
for j := i + 1; j < len(sortedScripts); j++ {
|
||||
if sortedScripts[i].Priority > sortedScripts[j].Priority ||
|
||||
(sortedScripts[i].Priority == sortedScripts[j].Priority &&
|
||||
sortedScripts[i].Sequence > sortedScripts[j].Sequence) {
|
||||
si, sj := sortedScripts[i], sortedScripts[j]
|
||||
// Compare by priority first
|
||||
if si.Priority > sj.Priority {
|
||||
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
|
||||
} else if si.Priority == sj.Priority {
|
||||
// If same priority, compare by sequence
|
||||
if si.Sequence > sj.Sequence {
|
||||
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
|
||||
} else if si.Sequence == sj.Sequence {
|
||||
// If same sequence, compare by name
|
||||
if si.Name > sj.Name {
|
||||
sortedScripts[i], sortedScripts[j] = sortedScripts[j], sortedScripts[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Expected order after sorting
|
||||
// Expected order after sorting (Priority -> Sequence -> Name)
|
||||
expectedOrder := []string{
|
||||
"script3", // Priority 1, Sequence 1
|
||||
"script4", // Priority 1, Sequence 2
|
||||
"script2", // Priority 1, Sequence 3
|
||||
"script1", // Priority 2, Sequence 1
|
||||
"script6", // Priority 2, Sequence 2
|
||||
"script5", // Priority 3, Sequence 1
|
||||
"a_script3", // Priority 1, Sequence 1, Name a_script3
|
||||
"b_script4", // Priority 1, Sequence 1, Name b_script4
|
||||
"script2", // Priority 1, Sequence 3
|
||||
"z_script1", // Priority 2, Sequence 1
|
||||
"script6", // Priority 2, Sequence 2
|
||||
"script5", // Priority 3, Sequence 1
|
||||
}
|
||||
|
||||
for i, expected := range expectedOrder {
|
||||
@@ -153,6 +163,13 @@ func TestScriptSorting(t *testing.T) {
|
||||
t.Errorf("Sequence not ascending at position %d with same priority %d: %d > %d",
|
||||
i, sortedScripts[i].Priority, sortedScripts[i].Sequence, sortedScripts[i+1].Sequence)
|
||||
}
|
||||
// Within same priority and sequence, names should be ascending
|
||||
if sortedScripts[i].Priority == sortedScripts[i+1].Priority &&
|
||||
sortedScripts[i].Sequence == sortedScripts[i+1].Sequence &&
|
||||
sortedScripts[i].Name > sortedScripts[i+1].Name {
|
||||
t.Errorf("Name not ascending at position %d with same priority/sequence: %s > %s",
|
||||
i, sortedScripts[i].Name, sortedScripts[i+1].Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package writers
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
)
|
||||
|
||||
@@ -28,3 +31,56 @@ type WriterOptions struct {
|
||||
// Additional options can be added here as needed
|
||||
Metadata map[string]interface{}
|
||||
}
|
||||
|
||||
// SanitizeFilename removes quotes, comments, and invalid characters from identifiers
|
||||
// to make them safe for use in filenames. This handles:
|
||||
// - Double and single quotes: "table_name" or 'table_name' -> table_name
|
||||
// - DBML comments: table [note: 'description'] -> table
|
||||
// - Invalid filename characters: replaced with underscores
|
||||
func SanitizeFilename(name string) string {
|
||||
// Remove DBML/DCTX style comments in brackets (e.g., [note: 'description'])
|
||||
commentRegex := regexp.MustCompile(`\s*\[.*?\]\s*`)
|
||||
name = commentRegex.ReplaceAllString(name, "")
|
||||
|
||||
// Remove quotes (both single and double)
|
||||
name = strings.ReplaceAll(name, `"`, "")
|
||||
name = strings.ReplaceAll(name, `'`, "")
|
||||
|
||||
// Remove backticks (MySQL style identifiers)
|
||||
name = strings.ReplaceAll(name, "`", "")
|
||||
|
||||
// Replace invalid filename characters with underscores
|
||||
// Invalid chars: / \ : * ? " < > | and control characters
|
||||
invalidChars := regexp.MustCompile(`[/\\:*?"<>|\x00-\x1f\x7f]`)
|
||||
name = invalidChars.ReplaceAllString(name, "_")
|
||||
|
||||
// Trim whitespace and consecutive underscores
|
||||
name = strings.TrimSpace(name)
|
||||
name = regexp.MustCompile(`_+`).ReplaceAllString(name, "_")
|
||||
name = strings.Trim(name, "_")
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
// SanitizeStructTagValue sanitizes a value to be safely used inside Go struct tags.
|
||||
// Go struct tags are delimited by backticks, so any backtick in the value would break the syntax.
|
||||
// This function:
|
||||
// - Removes DBML/DCTX comments in brackets
|
||||
// - Removes all quotes (double, single, and backticks)
|
||||
// - Returns a clean identifier safe for use in struct tags and field names
|
||||
func SanitizeStructTagValue(value string) string {
|
||||
// Remove DBML/DCTX style comments in brackets (e.g., [note: 'description'])
|
||||
commentRegex := regexp.MustCompile(`\s*\[.*?\]\s*`)
|
||||
value = commentRegex.ReplaceAllString(value, "")
|
||||
|
||||
// Trim whitespace
|
||||
value = strings.TrimSpace(value)
|
||||
|
||||
// Remove all quotes: backticks, double quotes, and single quotes
|
||||
// This ensures the value is clean for use as Go identifiers and struct tag values
|
||||
value = strings.ReplaceAll(value, "`", "")
|
||||
value = strings.ReplaceAll(value, `"`, "")
|
||||
value = strings.ReplaceAll(value, `'`, "")
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
5
tests/assets/dbml/multifile/1_users.dbml
Normal file
5
tests/assets/dbml/multifile/1_users.dbml
Normal file
@@ -0,0 +1,5 @@
|
||||
// First file - users table basic structure
|
||||
Table public.users {
|
||||
id bigint [pk, increment]
|
||||
email varchar(255) [unique, not null]
|
||||
}
|
||||
8
tests/assets/dbml/multifile/2_posts.dbml
Normal file
8
tests/assets/dbml/multifile/2_posts.dbml
Normal file
@@ -0,0 +1,8 @@
|
||||
// Second file - posts table
|
||||
Table public.posts {
|
||||
id bigint [pk, increment]
|
||||
user_id bigint [not null]
|
||||
title varchar(200) [not null]
|
||||
content text
|
||||
created_at timestamp [not null]
|
||||
}
|
||||
5
tests/assets/dbml/multifile/3_add_columns.dbml
Normal file
5
tests/assets/dbml/multifile/3_add_columns.dbml
Normal file
@@ -0,0 +1,5 @@
|
||||
// Third file - adds more columns to users table (tests merging)
|
||||
Table public.users {
|
||||
name varchar(100)
|
||||
created_at timestamp [not null]
|
||||
}
|
||||
10
tests/assets/dbml/multifile/9_refs.dbml
Normal file
10
tests/assets/dbml/multifile/9_refs.dbml
Normal file
@@ -0,0 +1,10 @@
|
||||
// File with commented-out refs - should load last
|
||||
// Contains relationships that depend on earlier tables
|
||||
|
||||
// Ref: public.posts.user_id > public.users.id [ondelete: CASCADE]
|
||||
|
||||
Table public.comments {
|
||||
id bigint [pk, increment]
|
||||
post_id bigint [not null]
|
||||
content text [not null]
|
||||
}
|
||||
346
vendor/modules.txt
vendored
346
vendor/modules.txt
vendored
@@ -1,6 +1,92 @@
|
||||
# 4d63.com/gocheckcompilerdirectives v1.3.0
|
||||
## explicit; go 1.22.0
|
||||
# 4d63.com/gochecknoglobals v0.2.2
|
||||
## explicit; go 1.18
|
||||
# github.com/4meepo/tagalign v1.4.2
|
||||
## explicit; go 1.22.0
|
||||
# github.com/Abirdcfly/dupword v0.1.3
|
||||
## explicit; go 1.22.0
|
||||
# github.com/Antonboom/errname v1.0.0
|
||||
## explicit; go 1.22.1
|
||||
# github.com/Antonboom/nilnil v1.0.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/Antonboom/testifylint v1.5.2
|
||||
## explicit; go 1.22.1
|
||||
# github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c
|
||||
## explicit; go 1.18
|
||||
# github.com/Crocmagnon/fatcontext v0.7.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24
|
||||
## explicit; go 1.13
|
||||
# github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1
|
||||
## explicit; go 1.23.0
|
||||
# github.com/Masterminds/semver/v3 v3.3.0
|
||||
## explicit; go 1.21
|
||||
# github.com/OpenPeeDeeP/depguard/v2 v2.2.1
|
||||
## explicit; go 1.23.0
|
||||
# github.com/alecthomas/go-check-sumtype v0.3.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/alexkohler/nakedret/v2 v2.0.5
|
||||
## explicit; go 1.21
|
||||
# github.com/alexkohler/prealloc v1.0.0
|
||||
## explicit; go 1.15
|
||||
# github.com/alingse/asasalint v0.0.11
|
||||
## explicit; go 1.18
|
||||
# github.com/alingse/nilnesserr v0.1.2
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ashanbrown/forbidigo v1.6.0
|
||||
## explicit; go 1.13
|
||||
# github.com/ashanbrown/makezero v1.2.0
|
||||
## explicit; go 1.12
|
||||
# github.com/beorn7/perks v1.0.1
|
||||
## explicit; go 1.11
|
||||
# github.com/bkielbasa/cyclop v1.2.3
|
||||
## explicit; go 1.22.0
|
||||
# github.com/blizzy78/varnamelen v0.8.0
|
||||
## explicit; go 1.16
|
||||
# github.com/bombsimon/wsl/v4 v4.5.0
|
||||
## explicit; go 1.22
|
||||
# github.com/breml/bidichk v0.3.2
|
||||
## explicit; go 1.22.0
|
||||
# github.com/breml/errchkjson v0.4.0
|
||||
## explicit; go 1.22.0
|
||||
# github.com/butuzov/ireturn v0.3.1
|
||||
## explicit; go 1.18
|
||||
# github.com/butuzov/mirror v1.3.0
|
||||
## explicit; go 1.19
|
||||
# github.com/catenacyber/perfsprint v0.8.2
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ccojocar/zxcvbn-go v1.0.2
|
||||
## explicit; go 1.20
|
||||
# github.com/cespare/xxhash/v2 v2.3.0
|
||||
## explicit; go 1.11
|
||||
# github.com/charithe/durationcheck v0.0.10
|
||||
## explicit; go 1.14
|
||||
# github.com/chavacava/garif v0.1.0
|
||||
## explicit; go 1.16
|
||||
# github.com/ckaznocha/intrange v0.3.0
|
||||
## explicit; go 1.22
|
||||
# github.com/curioswitch/go-reassign v0.3.0
|
||||
## explicit; go 1.21
|
||||
# github.com/daixiang0/gci v0.13.5
|
||||
## explicit; go 1.21
|
||||
# github.com/davecgh/go-spew v1.1.1
|
||||
## explicit
|
||||
github.com/davecgh/go-spew/spew
|
||||
# github.com/denis-tingaikin/go-header v0.5.0
|
||||
## explicit; go 1.21
|
||||
# github.com/ettle/strcase v0.2.0
|
||||
## explicit; go 1.12
|
||||
# github.com/fatih/color v1.18.0
|
||||
## explicit; go 1.17
|
||||
# github.com/fatih/structtag v1.2.0
|
||||
## explicit; go 1.12
|
||||
# github.com/firefart/nonamedreturns v1.0.5
|
||||
## explicit; go 1.18
|
||||
# github.com/fsnotify/fsnotify v1.5.4
|
||||
## explicit; go 1.16
|
||||
# github.com/fzipp/gocyclo v0.6.0
|
||||
## explicit; go 1.18
|
||||
# github.com/gdamore/encoding v1.0.1
|
||||
## explicit; go 1.9
|
||||
github.com/gdamore/encoding
|
||||
@@ -44,9 +130,75 @@ github.com/gdamore/tcell/v2/terminfo/x/xfce
|
||||
github.com/gdamore/tcell/v2/terminfo/x/xterm
|
||||
github.com/gdamore/tcell/v2/terminfo/x/xterm_ghostty
|
||||
github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty
|
||||
# github.com/ghostiam/protogetter v0.3.9
|
||||
## explicit; go 1.22.0
|
||||
# github.com/go-critic/go-critic v0.12.0
|
||||
## explicit; go 1.22.0
|
||||
# github.com/go-toolsmith/astcast v1.1.0
|
||||
## explicit; go 1.16
|
||||
# github.com/go-toolsmith/astcopy v1.1.0
|
||||
## explicit; go 1.16
|
||||
# github.com/go-toolsmith/astequal v1.2.0
|
||||
## explicit; go 1.18
|
||||
# github.com/go-toolsmith/astfmt v1.1.0
|
||||
## explicit; go 1.16
|
||||
# github.com/go-toolsmith/astp v1.1.0
|
||||
## explicit; go 1.16
|
||||
# github.com/go-toolsmith/strparse v1.1.0
|
||||
## explicit; go 1.16
|
||||
# github.com/go-toolsmith/typep v1.1.0
|
||||
## explicit; go 1.16
|
||||
# github.com/go-viper/mapstructure/v2 v2.2.1
|
||||
## explicit; go 1.18
|
||||
# github.com/go-xmlfmt/xmlfmt v1.1.3
|
||||
## explicit
|
||||
# github.com/gobwas/glob v0.2.3
|
||||
## explicit
|
||||
# github.com/gofrs/flock v0.12.1
|
||||
## explicit; go 1.21.0
|
||||
# github.com/golang/protobuf v1.5.3
|
||||
## explicit; go 1.9
|
||||
# github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32
|
||||
## explicit; go 1.22.0
|
||||
# github.com/golangci/go-printf-func-name v0.1.0
|
||||
## explicit; go 1.22.0
|
||||
# github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d
|
||||
## explicit; go 1.22.0
|
||||
# github.com/golangci/golangci-lint v1.64.8
|
||||
## explicit; go 1.23.0
|
||||
# github.com/golangci/misspell v0.6.0
|
||||
## explicit; go 1.21
|
||||
# github.com/golangci/plugin-module-register v0.1.1
|
||||
## explicit; go 1.21
|
||||
# github.com/golangci/revgrep v0.8.0
|
||||
## explicit; go 1.21
|
||||
# github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed
|
||||
## explicit; go 1.20
|
||||
# github.com/google/go-cmp v0.7.0
|
||||
## explicit; go 1.21
|
||||
# github.com/google/uuid v1.6.0
|
||||
## explicit
|
||||
github.com/google/uuid
|
||||
# github.com/gordonklaus/ineffassign v0.1.0
|
||||
## explicit; go 1.14
|
||||
# github.com/gostaticanalysis/analysisutil v0.7.1
|
||||
## explicit; go 1.16
|
||||
# github.com/gostaticanalysis/comment v1.5.0
|
||||
## explicit; go 1.22.9
|
||||
# github.com/gostaticanalysis/forcetypeassert v0.2.0
|
||||
## explicit; go 1.23.0
|
||||
# github.com/gostaticanalysis/nilerr v0.1.1
|
||||
## explicit; go 1.15
|
||||
# github.com/hashicorp/go-immutable-radix/v2 v2.1.0
|
||||
## explicit; go 1.18
|
||||
# github.com/hashicorp/go-version v1.7.0
|
||||
## explicit
|
||||
# github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
## explicit; go 1.18
|
||||
# github.com/hashicorp/hcl v1.0.0
|
||||
## explicit
|
||||
# github.com/hexops/gotextdiff v1.0.3
|
||||
## explicit; go 1.16
|
||||
# github.com/inconshreveable/mousetrap v1.1.0
|
||||
## explicit; go 1.18
|
||||
github.com/inconshreveable/mousetrap
|
||||
@@ -68,23 +220,115 @@ github.com/jackc/pgx/v5/pgconn/ctxwatch
|
||||
github.com/jackc/pgx/v5/pgconn/internal/bgreader
|
||||
github.com/jackc/pgx/v5/pgproto3
|
||||
github.com/jackc/pgx/v5/pgtype
|
||||
# github.com/jgautheron/goconst v1.7.1
|
||||
## explicit; go 1.13
|
||||
# github.com/jingyugao/rowserrcheck v1.1.1
|
||||
## explicit; go 1.13
|
||||
# github.com/jinzhu/inflection v1.0.0
|
||||
## explicit
|
||||
github.com/jinzhu/inflection
|
||||
# github.com/jjti/go-spancheck v0.6.4
|
||||
## explicit; go 1.22.1
|
||||
# github.com/julz/importas v0.2.0
|
||||
## explicit; go 1.20
|
||||
# github.com/karamaru-alpha/copyloopvar v1.2.1
|
||||
## explicit; go 1.21
|
||||
# github.com/kisielk/errcheck v1.9.0
|
||||
## explicit; go 1.22.0
|
||||
# github.com/kkHAIKE/contextcheck v1.1.6
|
||||
## explicit; go 1.23.0
|
||||
# github.com/kr/pretty v0.3.1
|
||||
## explicit; go 1.12
|
||||
# github.com/kulti/thelper v0.6.3
|
||||
## explicit; go 1.18
|
||||
# github.com/kunwardeep/paralleltest v1.0.10
|
||||
## explicit; go 1.17
|
||||
# github.com/lasiar/canonicalheader v1.1.2
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ldez/exptostd v0.4.2
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ldez/gomoddirectives v0.6.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ldez/grignotin v0.9.0
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ldez/tagliatelle v0.7.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ldez/usetesting v0.4.2
|
||||
## explicit; go 1.22.0
|
||||
# github.com/leonklingele/grouper v1.1.2
|
||||
## explicit; go 1.18
|
||||
# github.com/lucasb-eyer/go-colorful v1.2.0
|
||||
## explicit; go 1.12
|
||||
github.com/lucasb-eyer/go-colorful
|
||||
# github.com/macabu/inamedparam v0.1.3
|
||||
## explicit; go 1.20
|
||||
# github.com/magiconair/properties v1.8.6
|
||||
## explicit; go 1.13
|
||||
# github.com/maratori/testableexamples v1.0.0
|
||||
## explicit; go 1.19
|
||||
# github.com/maratori/testpackage v1.1.1
|
||||
## explicit; go 1.20
|
||||
# github.com/matoous/godox v1.1.0
|
||||
## explicit; go 1.18
|
||||
# github.com/mattn/go-colorable v0.1.14
|
||||
## explicit; go 1.18
|
||||
# github.com/mattn/go-isatty v0.0.20
|
||||
## explicit; go 1.15
|
||||
# github.com/mattn/go-runewidth v0.0.16
|
||||
## explicit; go 1.9
|
||||
github.com/mattn/go-runewidth
|
||||
# github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
## explicit
|
||||
# github.com/mgechev/revive v1.7.0
|
||||
## explicit; go 1.22.1
|
||||
# github.com/mitchellh/go-homedir v1.1.0
|
||||
## explicit
|
||||
# github.com/mitchellh/mapstructure v1.5.0
|
||||
## explicit; go 1.14
|
||||
# github.com/moricho/tparallel v0.3.2
|
||||
## explicit; go 1.20
|
||||
# github.com/nakabonne/nestif v0.3.1
|
||||
## explicit; go 1.15
|
||||
# github.com/nishanths/exhaustive v0.12.0
|
||||
## explicit; go 1.18
|
||||
# github.com/nishanths/predeclared v0.2.2
|
||||
## explicit; go 1.14
|
||||
# github.com/nunnatsa/ginkgolinter v0.19.1
|
||||
## explicit; go 1.23.0
|
||||
# github.com/olekukonko/tablewriter v0.0.5
|
||||
## explicit; go 1.12
|
||||
# github.com/pelletier/go-toml v1.9.5
|
||||
## explicit; go 1.12
|
||||
# github.com/pelletier/go-toml/v2 v2.2.3
|
||||
## explicit; go 1.21.0
|
||||
# github.com/pmezard/go-difflib v1.0.0
|
||||
## explicit
|
||||
github.com/pmezard/go-difflib/difflib
|
||||
# github.com/polyfloyd/go-errorlint v1.7.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/prometheus/client_golang v1.12.1
|
||||
## explicit; go 1.13
|
||||
# github.com/prometheus/client_model v0.2.0
|
||||
## explicit; go 1.9
|
||||
# github.com/prometheus/common v0.32.1
|
||||
## explicit; go 1.13
|
||||
# github.com/prometheus/procfs v0.7.3
|
||||
## explicit; go 1.13
|
||||
# github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
## explicit; go 1.18
|
||||
github.com/puzpuzpuz/xsync/v3
|
||||
# github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1
|
||||
## explicit; go 1.19
|
||||
# github.com/quasilyte/go-ruleguard/dsl v0.3.22
|
||||
## explicit; go 1.15
|
||||
# github.com/quasilyte/gogrep v0.5.0
|
||||
## explicit; go 1.16
|
||||
# github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727
|
||||
## explicit; go 1.14
|
||||
# github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567
|
||||
## explicit; go 1.17
|
||||
# github.com/raeperd/recvcheck v0.2.0
|
||||
## explicit; go 1.22.0
|
||||
# github.com/rivo/tview v0.42.0
|
||||
## explicit; go 1.18
|
||||
github.com/rivo/tview
|
||||
@@ -93,20 +337,76 @@ github.com/rivo/tview
|
||||
github.com/rivo/uniseg
|
||||
# github.com/rogpeppe/go-internal v1.14.1
|
||||
## explicit; go 1.23
|
||||
# github.com/ryancurrah/gomodguard v1.3.5
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ryanrolds/sqlclosecheck v0.5.1
|
||||
## explicit; go 1.20
|
||||
# github.com/sanposhiho/wastedassign/v2 v2.1.0
|
||||
## explicit; go 1.18
|
||||
# github.com/santhosh-tekuri/jsonschema/v6 v6.0.1
|
||||
## explicit; go 1.21
|
||||
# github.com/sashamelentyev/interfacebloat v1.1.0
|
||||
## explicit; go 1.18
|
||||
# github.com/sashamelentyev/usestdlibvars v1.28.0
|
||||
## explicit; go 1.20
|
||||
# github.com/securego/gosec/v2 v2.22.2
|
||||
## explicit; go 1.23.0
|
||||
# github.com/sirupsen/logrus v1.9.3
|
||||
## explicit; go 1.13
|
||||
# github.com/sivchari/containedctx v1.0.3
|
||||
## explicit; go 1.17
|
||||
# github.com/sivchari/tenv v1.12.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/sonatard/noctx v0.1.0
|
||||
## explicit; go 1.22.0
|
||||
# github.com/sourcegraph/go-diff v0.7.0
|
||||
## explicit; go 1.14
|
||||
# github.com/spf13/afero v1.12.0
|
||||
## explicit; go 1.21
|
||||
# github.com/spf13/cast v1.5.0
|
||||
## explicit; go 1.18
|
||||
# github.com/spf13/cobra v1.10.2
|
||||
## explicit; go 1.15
|
||||
github.com/spf13/cobra
|
||||
# github.com/spf13/jwalterweatherman v1.1.0
|
||||
## explicit
|
||||
# github.com/spf13/pflag v1.0.10
|
||||
## explicit; go 1.12
|
||||
github.com/spf13/pflag
|
||||
# github.com/spf13/viper v1.12.0
|
||||
## explicit; go 1.17
|
||||
# github.com/ssgreg/nlreturn/v2 v2.2.1
|
||||
## explicit; go 1.13
|
||||
# github.com/stbenjam/no-sprintf-host-port v0.2.0
|
||||
## explicit; go 1.18
|
||||
# github.com/stretchr/objx v0.5.2
|
||||
## explicit; go 1.20
|
||||
# github.com/stretchr/testify v1.11.1
|
||||
## explicit; go 1.17
|
||||
github.com/stretchr/testify/assert
|
||||
github.com/stretchr/testify/assert/yaml
|
||||
github.com/stretchr/testify/require
|
||||
# github.com/subosito/gotenv v1.4.1
|
||||
## explicit; go 1.18
|
||||
# github.com/tdakkota/asciicheck v0.4.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/tetafro/godot v1.5.0
|
||||
## explicit; go 1.20
|
||||
# github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3
|
||||
## explicit; go 1.12
|
||||
# github.com/timonwong/loggercheck v0.10.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc
|
||||
## explicit
|
||||
github.com/tmthrgd/go-hex
|
||||
# github.com/tomarrell/wrapcheck/v2 v2.10.0
|
||||
## explicit; go 1.21
|
||||
# github.com/tommy-muehle/go-mnd/v2 v2.5.1
|
||||
## explicit; go 1.12
|
||||
# github.com/ultraware/funlen v0.2.0
|
||||
## explicit; go 1.22.0
|
||||
# github.com/ultraware/whitespace v0.2.0
|
||||
## explicit; go 1.20
|
||||
# github.com/uptrace/bun v1.2.16
|
||||
## explicit; go 1.24.0
|
||||
github.com/uptrace/bun
|
||||
@@ -118,6 +418,10 @@ github.com/uptrace/bun/internal
|
||||
github.com/uptrace/bun/internal/parser
|
||||
github.com/uptrace/bun/internal/tagparser
|
||||
github.com/uptrace/bun/schema
|
||||
# github.com/uudashr/gocognit v1.2.0
|
||||
## explicit; go 1.19
|
||||
# github.com/uudashr/iface v1.3.1
|
||||
## explicit; go 1.22.1
|
||||
# github.com/vmihailenco/msgpack/v5 v5.4.1
|
||||
## explicit; go 1.19
|
||||
github.com/vmihailenco/msgpack/v5
|
||||
@@ -127,9 +431,37 @@ github.com/vmihailenco/msgpack/v5/msgpcode
|
||||
github.com/vmihailenco/tagparser/v2
|
||||
github.com/vmihailenco/tagparser/v2/internal
|
||||
github.com/vmihailenco/tagparser/v2/internal/parser
|
||||
# github.com/xen0n/gosmopolitan v1.2.2
|
||||
## explicit; go 1.19
|
||||
# github.com/yagipy/maintidx v1.0.0
|
||||
## explicit; go 1.17
|
||||
# github.com/yeya24/promlinter v0.3.0
|
||||
## explicit; go 1.20
|
||||
# github.com/ykadowak/zerologlint v0.1.5
|
||||
## explicit; go 1.19
|
||||
# gitlab.com/bosi/decorder v0.4.2
|
||||
## explicit; go 1.20
|
||||
# go-simpler.org/musttag v0.13.0
|
||||
## explicit; go 1.20
|
||||
# go-simpler.org/sloglint v0.9.0
|
||||
## explicit; go 1.22.0
|
||||
# go.uber.org/atomic v1.7.0
|
||||
## explicit; go 1.13
|
||||
# go.uber.org/automaxprocs v1.6.0
|
||||
## explicit; go 1.20
|
||||
# go.uber.org/multierr v1.6.0
|
||||
## explicit; go 1.12
|
||||
# go.uber.org/zap v1.24.0
|
||||
## explicit; go 1.19
|
||||
# golang.org/x/crypto v0.41.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/crypto/pbkdf2
|
||||
# golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac
|
||||
## explicit; go 1.18
|
||||
# golang.org/x/mod v0.26.0
|
||||
## explicit; go 1.23.0
|
||||
# golang.org/x/sync v0.16.0
|
||||
## explicit; go 1.23.0
|
||||
# golang.org/x/sys v0.38.0
|
||||
## explicit; go 1.24.0
|
||||
golang.org/x/sys/cpu
|
||||
@@ -156,6 +488,20 @@ golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
golang.org/x/text/width
|
||||
# golang.org/x/tools v0.35.0
|
||||
## explicit; go 1.23.0
|
||||
# google.golang.org/protobuf v1.36.5
|
||||
## explicit; go 1.21
|
||||
# gopkg.in/ini.v1 v1.67.0
|
||||
## explicit
|
||||
# gopkg.in/yaml.v2 v2.4.0
|
||||
## explicit; go 1.15
|
||||
# gopkg.in/yaml.v3 v3.0.1
|
||||
## explicit
|
||||
gopkg.in/yaml.v3
|
||||
# honnef.co/go/tools v0.6.1
|
||||
## explicit; go 1.23
|
||||
# mvdan.cc/gofumpt v0.7.0
|
||||
## explicit; go 1.22
|
||||
# mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f
|
||||
## explicit; go 1.21
|
||||
|
||||
Reference in New Issue
Block a user