diff --git a/README.md b/README.md index 591a693..570298e 100644 --- a/README.md +++ b/README.md @@ -118,6 +118,55 @@ go install -v git.warky.dev/wdevs/relspecgo/cmd/relspec@latest ## Usage +### Interactive Schema Editor + +```bash +# Launch interactive editor with a DBML schema +relspec edit --from dbml --from-path schema.dbml --to dbml --to-path schema.dbml + +# Edit PostgreSQL database in place +relspec edit --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \ + --to pgsql --to-conn "postgres://user:pass@localhost/mydb" + +# Edit JSON schema and save as GORM models +relspec edit --from json --from-path db.json --to gorm --to-path models/ +``` + +The `edit` command launches an interactive terminal user interface where you can: +- Browse and navigate your database structure +- Create, modify, and delete schemas, tables, and columns +- Configure column properties, constraints, and relationships +- Save changes to various formats +- Import and merge schemas from other databases + +### Schema Merging + +```bash +# Merge two JSON schemas (additive merge - adds missing items only) +relspec merge --target json --target-path base.json \ + --source json --source-path additions.json \ + --output json --output-path merged.json + +# Merge PostgreSQL database into JSON, skipping specific tables +relspec merge --target json --target-path current.json \ + --source pgsql --source-conn "postgres://user:pass@localhost/source_db" \ + --output json --output-path updated.json \ + --skip-tables "audit_log,temp_tables" + +# Cross-format merge (DBML + YAML → JSON) +relspec merge --target dbml --target-path base.dbml \ + --source yaml --source-path additions.yaml \ + --output json --output-path result.json \ + --skip-relations --skip-views +``` + +The `merge` command combines two database schemas additively: +- Adds missing schemas, tables, columns, and other objects +- Never modifies or deletes existing items (safe operation) +- Supports selective merging with skip options (domains, relations, enums, views, sequences, specific tables) +- Works across any combination of supported formats +- Perfect for integrating multiple schema definitions or applying patches + ### Schema Conversion ```bash diff --git a/cmd/relspec/merge.go b/cmd/relspec/merge.go new file mode 100644 index 0000000..bc90ffb --- /dev/null +++ b/cmd/relspec/merge.go @@ -0,0 +1,433 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + + "git.warky.dev/wdevs/relspecgo/pkg/merge" + "git.warky.dev/wdevs/relspecgo/pkg/models" + "git.warky.dev/wdevs/relspecgo/pkg/readers" + "git.warky.dev/wdevs/relspecgo/pkg/readers/bun" + "git.warky.dev/wdevs/relspecgo/pkg/readers/dbml" + "git.warky.dev/wdevs/relspecgo/pkg/readers/dctx" + "git.warky.dev/wdevs/relspecgo/pkg/readers/drawdb" + "git.warky.dev/wdevs/relspecgo/pkg/readers/drizzle" + "git.warky.dev/wdevs/relspecgo/pkg/readers/gorm" + "git.warky.dev/wdevs/relspecgo/pkg/readers/graphql" + "git.warky.dev/wdevs/relspecgo/pkg/readers/json" + "git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql" + "git.warky.dev/wdevs/relspecgo/pkg/readers/prisma" + "git.warky.dev/wdevs/relspecgo/pkg/readers/typeorm" + "git.warky.dev/wdevs/relspecgo/pkg/readers/yaml" + "git.warky.dev/wdevs/relspecgo/pkg/writers" + wbun "git.warky.dev/wdevs/relspecgo/pkg/writers/bun" + wdbml "git.warky.dev/wdevs/relspecgo/pkg/writers/dbml" + wdctx "git.warky.dev/wdevs/relspecgo/pkg/writers/dctx" + wdrawdb "git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb" + wdrizzle "git.warky.dev/wdevs/relspecgo/pkg/writers/drizzle" + wgorm "git.warky.dev/wdevs/relspecgo/pkg/writers/gorm" + wgraphql "git.warky.dev/wdevs/relspecgo/pkg/writers/graphql" + wjson "git.warky.dev/wdevs/relspecgo/pkg/writers/json" + wpgsql "git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql" + wprisma "git.warky.dev/wdevs/relspecgo/pkg/writers/prisma" + wtypeorm "git.warky.dev/wdevs/relspecgo/pkg/writers/typeorm" + wyaml "git.warky.dev/wdevs/relspecgo/pkg/writers/yaml" +) + +var ( + mergeTargetType string + mergeTargetPath string + mergeTargetConn string + mergeSourceType string + mergeSourcePath string + mergeSourceConn string + mergeOutputType string + mergeOutputPath string + mergeOutputConn string + mergeSkipDomains bool + mergeSkipRelations bool + mergeSkipEnums bool + mergeSkipViews bool + mergeSkipSequences bool + mergeSkipTables string // Comma-separated table names to skip + mergeVerbose bool +) + +var mergeCmd = &cobra.Command{ + Use: "merge", + Short: "Merge database schemas (additive only - adds missing items)", + Long: `Merge one database schema into another. Performs additive merging only: +adds missing schemas, tables, columns, and other objects without modifying +or deleting existing items. + +The target database is loaded first, then the source database is merged into it. +The result can be saved to a new format or updated in place. + +Examples: + # Merge two JSON schemas + relspec merge --target json --target-path base.json \ + --source json --source-path additional.json \ + --output json --output-path merged.json + + # Merge from PostgreSQL into JSON + relspec merge --target json --target-path mydb.json \ + --source pgsql --source-conn "postgres://user:pass@localhost/source_db" \ + --output json --output-path combined.json + + # Merge DBML and YAML, skip relations + relspec merge --target dbml --target-path schema.dbml \ + --source yaml --source-path tables.yaml \ + --output dbml --output-path merged.dbml \ + --skip-relations + + # Merge and save back to target format + relspec merge --target json --target-path base.json \ + --source json --source-path patch.json \ + --output json --output-path base.json`, + RunE: runMerge, +} + +func init() { + // Target database flags + mergeCmd.Flags().StringVar(&mergeTargetType, "target", "", "Target format (required): dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql") + mergeCmd.Flags().StringVar(&mergeTargetPath, "target-path", "", "Target file path (required for file-based formats)") + mergeCmd.Flags().StringVar(&mergeTargetConn, "target-conn", "", "Target connection string (required for pgsql)") + + // Source database flags + mergeCmd.Flags().StringVar(&mergeSourceType, "source", "", "Source format (required): dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql") + mergeCmd.Flags().StringVar(&mergeSourcePath, "source-path", "", "Source file path (required for file-based formats)") + mergeCmd.Flags().StringVar(&mergeSourceConn, "source-conn", "", "Source connection string (required for pgsql)") + + // Output flags + mergeCmd.Flags().StringVar(&mergeOutputType, "output", "", "Output format (required): dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql") + mergeCmd.Flags().StringVar(&mergeOutputPath, "output-path", "", "Output file path (required for file-based formats)") + mergeCmd.Flags().StringVar(&mergeOutputConn, "output-conn", "", "Output connection string (for pgsql)") + + // Merge options + mergeCmd.Flags().BoolVar(&mergeSkipDomains, "skip-domains", false, "Skip domains during merge") + mergeCmd.Flags().BoolVar(&mergeSkipRelations, "skip-relations", false, "Skip relations during merge") + mergeCmd.Flags().BoolVar(&mergeSkipEnums, "skip-enums", false, "Skip enums during merge") + mergeCmd.Flags().BoolVar(&mergeSkipViews, "skip-views", false, "Skip views during merge") + mergeCmd.Flags().BoolVar(&mergeSkipSequences, "skip-sequences", false, "Skip sequences during merge") + mergeCmd.Flags().StringVar(&mergeSkipTables, "skip-tables", "", "Comma-separated list of table names to skip during merge") + mergeCmd.Flags().BoolVar(&mergeVerbose, "verbose", false, "Show verbose output") +} + +func runMerge(cmd *cobra.Command, args []string) error { + fmt.Fprintf(os.Stderr, "\n=== RelSpec Merge ===\n") + fmt.Fprintf(os.Stderr, "Started at: %s\n\n", getCurrentTimestamp()) + + // Validate required flags + if mergeTargetType == "" { + return fmt.Errorf("--target format is required") + } + if mergeSourceType == "" { + return fmt.Errorf("--source format is required") + } + if mergeOutputType == "" { + return fmt.Errorf("--output format is required") + } + + // Validate and expand file paths + if mergeTargetType != "pgsql" { + if mergeTargetPath == "" { + return fmt.Errorf("--target-path is required for %s format", mergeTargetType) + } + mergeTargetPath = expandPath(mergeTargetPath) + } else if mergeTargetConn == "" { + + return fmt.Errorf("--target-conn is required for pgsql format") + + } + + if mergeSourceType != "pgsql" { + if mergeSourcePath == "" { + return fmt.Errorf("--source-path is required for %s format", mergeSourceType) + } + mergeSourcePath = expandPath(mergeSourcePath) + } else if mergeSourceConn == "" { + return fmt.Errorf("--source-conn is required for pgsql format") + } + + if mergeOutputType != "pgsql" { + if mergeOutputPath == "" { + return fmt.Errorf("--output-path is required for %s format", mergeOutputType) + } + mergeOutputPath = expandPath(mergeOutputPath) + } + + // Step 1: Read target database + fmt.Fprintf(os.Stderr, "[1/3] Reading target database...\n") + fmt.Fprintf(os.Stderr, " Format: %s\n", mergeTargetType) + if mergeTargetPath != "" { + fmt.Fprintf(os.Stderr, " Path: %s\n", mergeTargetPath) + } + if mergeTargetConn != "" { + fmt.Fprintf(os.Stderr, " Conn: %s\n", maskPassword(mergeTargetConn)) + } + + targetDB, err := readDatabaseForMerge(mergeTargetType, mergeTargetPath, mergeTargetConn, "Target") + if err != nil { + return fmt.Errorf("failed to read target database: %w", err) + } + fmt.Fprintf(os.Stderr, " ✓ Successfully read target database '%s'\n", targetDB.Name) + printDatabaseStats(targetDB) + + // Step 2: Read source database + fmt.Fprintf(os.Stderr, "\n[2/3] Reading source database...\n") + fmt.Fprintf(os.Stderr, " Format: %s\n", mergeSourceType) + if mergeSourcePath != "" { + fmt.Fprintf(os.Stderr, " Path: %s\n", mergeSourcePath) + } + if mergeSourceConn != "" { + fmt.Fprintf(os.Stderr, " Conn: %s\n", maskPassword(mergeSourceConn)) + } + + sourceDB, err := readDatabaseForMerge(mergeSourceType, mergeSourcePath, mergeSourceConn, "Source") + if err != nil { + return fmt.Errorf("failed to read source database: %w", err) + } + fmt.Fprintf(os.Stderr, " ✓ Successfully read source database '%s'\n", sourceDB.Name) + printDatabaseStats(sourceDB) + + // Step 3: Merge databases + fmt.Fprintf(os.Stderr, "\n[3/3] Merging databases...\n") + + opts := &merge.MergeOptions{ + SkipDomains: mergeSkipDomains, + SkipRelations: mergeSkipRelations, + SkipEnums: mergeSkipEnums, + SkipViews: mergeSkipViews, + SkipSequences: mergeSkipSequences, + } + + // Parse skip-tables flag + if mergeSkipTables != "" { + opts.SkipTableNames = parseSkipTables(mergeSkipTables) + if len(opts.SkipTableNames) > 0 { + fmt.Fprintf(os.Stderr, " Skipping tables: %s\n", mergeSkipTables) + } + } + + result := merge.MergeDatabases(targetDB, sourceDB, opts) + + // Update timestamp + targetDB.UpdateDate() + + // Print merge summary + fmt.Fprintf(os.Stderr, " ✓ Merge complete\n\n") + fmt.Fprintf(os.Stderr, "%s\n", merge.GetMergeSummary(result)) + + // Step 4: Write output + fmt.Fprintf(os.Stderr, "\n[4/4] Writing output...\n") + fmt.Fprintf(os.Stderr, " Format: %s\n", mergeOutputType) + if mergeOutputPath != "" { + fmt.Fprintf(os.Stderr, " Path: %s\n", mergeOutputPath) + } + + err = writeDatabaseForMerge(mergeOutputType, mergeOutputPath, "", targetDB, "Output") + if err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + + fmt.Fprintf(os.Stderr, " ✓ Successfully written merged database\n") + fmt.Fprintf(os.Stderr, "\n=== Merge complete ===\n") + + return nil +} + +func readDatabaseForMerge(dbType, filePath, connString, label string) (*models.Database, error) { + var reader readers.Reader + + switch strings.ToLower(dbType) { + case "dbml": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for DBML format", label) + } + reader = dbml.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "dctx": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for DCTX format", label) + } + reader = dctx.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "drawdb": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for DrawDB format", label) + } + reader = drawdb.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "graphql": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for GraphQL format", label) + } + reader = graphql.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "json": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for JSON format", label) + } + reader = json.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "yaml": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for YAML format", label) + } + reader = yaml.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "gorm": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for GORM format", label) + } + reader = gorm.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "bun": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for Bun format", label) + } + reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "drizzle": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for Drizzle format", label) + } + reader = drizzle.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "prisma": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for Prisma format", label) + } + reader = prisma.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "typeorm": + if filePath == "" { + return nil, fmt.Errorf("%s: file path is required for TypeORM format", label) + } + reader = typeorm.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "pgsql": + if connString == "" { + return nil, fmt.Errorf("%s: connection string is required for PostgreSQL format", label) + } + reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString}) + default: + return nil, fmt.Errorf("%s: unsupported format '%s'", label, dbType) + } + + db, err := reader.ReadDatabase() + if err != nil { + return nil, err + } + + return db, nil +} + +func writeDatabaseForMerge(dbType, filePath, connString string, db *models.Database, label string) error { + var writer writers.Writer + + switch strings.ToLower(dbType) { + case "dbml": + if filePath == "" { + return fmt.Errorf("%s: file path is required for DBML format", label) + } + writer = wdbml.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "dctx": + if filePath == "" { + return fmt.Errorf("%s: file path is required for DCTX format", label) + } + writer = wdctx.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "drawdb": + if filePath == "" { + return fmt.Errorf("%s: file path is required for DrawDB format", label) + } + writer = wdrawdb.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "graphql": + if filePath == "" { + return fmt.Errorf("%s: file path is required for GraphQL format", label) + } + writer = wgraphql.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "json": + if filePath == "" { + return fmt.Errorf("%s: file path is required for JSON format", label) + } + writer = wjson.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "yaml": + if filePath == "" { + return fmt.Errorf("%s: file path is required for YAML format", label) + } + writer = wyaml.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "gorm": + if filePath == "" { + return fmt.Errorf("%s: file path is required for GORM format", label) + } + writer = wgorm.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "bun": + if filePath == "" { + return fmt.Errorf("%s: file path is required for Bun format", label) + } + writer = wbun.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "drizzle": + if filePath == "" { + return fmt.Errorf("%s: file path is required for Drizzle format", label) + } + writer = wdrizzle.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "prisma": + if filePath == "" { + return fmt.Errorf("%s: file path is required for Prisma format", label) + } + writer = wprisma.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "typeorm": + if filePath == "" { + return fmt.Errorf("%s: file path is required for TypeORM format", label) + } + writer = wtypeorm.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + case "pgsql": + writer = wpgsql.NewWriter(&writers.WriterOptions{OutputPath: filePath}) + default: + return fmt.Errorf("%s: unsupported format '%s'", label, dbType) + } + + return writer.WriteDatabase(db) +} + +func expandPath(path string) string { + if len(path) > 0 && path[0] == '~' { + home, err := os.UserHomeDir() + if err == nil { + return filepath.Join(home, path[1:]) + } + } + return path +} + +func printDatabaseStats(db *models.Database) { + totalTables := 0 + totalColumns := 0 + totalConstraints := 0 + totalIndexes := 0 + + for _, schema := range db.Schemas { + totalTables += len(schema.Tables) + for _, table := range schema.Tables { + totalColumns += len(table.Columns) + totalConstraints += len(table.Constraints) + totalIndexes += len(table.Indexes) + } + } + + fmt.Fprintf(os.Stderr, " Schemas: %d, Tables: %d, Columns: %d, Constraints: %d, Indexes: %d\n", + len(db.Schemas), totalTables, totalColumns, totalConstraints, totalIndexes) +} + +func parseSkipTables(skipTablesStr string) map[string]bool { + skipTables := make(map[string]bool) + if skipTablesStr == "" { + return skipTables + } + + // Split by comma and trim whitespace + parts := strings.Split(skipTablesStr, ",") + for _, part := range parts { + trimmed := strings.TrimSpace(part) + if trimmed != "" { + // Store in lowercase for case-insensitive matching + skipTables[strings.ToLower(trimmed)] = true + } + } + + return skipTables +} diff --git a/cmd/relspec/root.go b/cmd/relspec/root.go index cb33ce6..cce8c97 100644 --- a/cmd/relspec/root.go +++ b/cmd/relspec/root.go @@ -22,4 +22,5 @@ func init() { rootCmd.AddCommand(scriptsCmd) rootCmd.AddCommand(templCmd) rootCmd.AddCommand(editCmd) + rootCmd.AddCommand(mergeCmd) } diff --git a/pkg/merge/merge.go b/pkg/merge/merge.go new file mode 100644 index 0000000..b29d7d7 --- /dev/null +++ b/pkg/merge/merge.go @@ -0,0 +1,574 @@ +// Package merge provides utilities for merging database schemas. +// It allows combining schemas from multiple sources while avoiding duplicates, +// supporting only additive operations (no deletion or modification of existing items). +package merge + +import ( + "fmt" + "strings" + + "git.warky.dev/wdevs/relspecgo/pkg/models" +) + +// MergeResult represents the result of a merge operation +type MergeResult struct { + SchemasAdded int + TablesAdded int + ColumnsAdded int + RelationsAdded int + DomainsAdded int + EnumsAdded int + ViewsAdded int + SequencesAdded int +} + +// MergeOptions contains options for merge operations +type MergeOptions struct { + SkipDomains bool + SkipRelations bool + SkipEnums bool + SkipViews bool + SkipSequences bool + SkipTableNames map[string]bool // Tables to skip during merge (keyed by table name) +} + +// MergeDatabases merges the source database into the target database. +// Only adds missing items; existing items are not modified. +func MergeDatabases(target, source *models.Database, opts *MergeOptions) *MergeResult { + if opts == nil { + opts = &MergeOptions{} + } + + result := &MergeResult{} + + if target == nil || source == nil { + return result + } + + // Merge schemas and their contents + result.merge(target, source, opts) + + return result +} + +func (r *MergeResult) merge(target, source *models.Database, opts *MergeOptions) { + // Create maps of existing schemas for quick lookup + existingSchemas := make(map[string]*models.Schema) + for _, schema := range target.Schemas { + existingSchemas[schema.SQLName()] = schema + } + + // Merge schemas + for _, srcSchema := range source.Schemas { + schemaName := srcSchema.SQLName() + if tgtSchema, exists := existingSchemas[schemaName]; exists { + // Schema exists, merge its contents + r.mergeSchemaContents(tgtSchema, srcSchema, opts) + } else { + // Schema doesn't exist, add it + newSchema := cloneSchema(srcSchema) + target.Schemas = append(target.Schemas, newSchema) + r.SchemasAdded++ + } + } + + // Merge domains if not skipped + if !opts.SkipDomains { + r.mergeDomains(target, source) + } +} + +func (r *MergeResult) mergeSchemaContents(target, source *models.Schema, opts *MergeOptions) { + // Merge tables + r.mergeTables(target, source, opts) + + // Merge views if not skipped + if !opts.SkipViews { + r.mergeViews(target, source) + } + + // Merge sequences if not skipped + if !opts.SkipSequences { + r.mergeSequences(target, source) + } + + // Merge enums if not skipped + if !opts.SkipEnums { + r.mergeEnums(target, source) + } + + // Merge relations if not skipped + if !opts.SkipRelations { + r.mergeRelations(target, source) + } +} + +func (r *MergeResult) mergeTables(schema *models.Schema, source *models.Schema, opts *MergeOptions) { + // Create map of existing tables + existingTables := make(map[string]*models.Table) + for _, table := range schema.Tables { + existingTables[table.SQLName()] = table + } + + // Merge tables + for _, srcTable := range source.Tables { + tableName := srcTable.SQLName() + + // Skip if table is in the skip list (case-insensitive) + if opts != nil && opts.SkipTableNames != nil && opts.SkipTableNames[strings.ToLower(tableName)] { + continue + } + + if tgtTable, exists := existingTables[tableName]; exists { + // Table exists, merge its columns + r.mergeColumns(tgtTable, srcTable) + } else { + // Table doesn't exist, add it + newTable := cloneTable(srcTable) + schema.Tables = append(schema.Tables, newTable) + r.TablesAdded++ + // Count columns in the newly added table + r.ColumnsAdded += len(newTable.Columns) + } + } +} + +func (r *MergeResult) mergeColumns(table *models.Table, srcTable *models.Table) { + // Create map of existing columns + existingColumns := make(map[string]*models.Column) + for colName := range table.Columns { + existingColumns[colName] = table.Columns[colName] + } + + // Merge columns + for colName, srcCol := range srcTable.Columns { + if _, exists := existingColumns[colName]; !exists { + // Column doesn't exist, add it + newCol := cloneColumn(srcCol) + table.Columns[colName] = newCol + r.ColumnsAdded++ + } + } +} + +func (r *MergeResult) mergeViews(schema *models.Schema, source *models.Schema) { + // Create map of existing views + existingViews := make(map[string]*models.View) + for _, view := range schema.Views { + existingViews[view.SQLName()] = view + } + + // Merge views + for _, srcView := range source.Views { + viewName := srcView.SQLName() + if _, exists := existingViews[viewName]; !exists { + // View doesn't exist, add it + newView := cloneView(srcView) + schema.Views = append(schema.Views, newView) + r.ViewsAdded++ + } + } +} + +func (r *MergeResult) mergeSequences(schema *models.Schema, source *models.Schema) { + // Create map of existing sequences + existingSequences := make(map[string]*models.Sequence) + for _, seq := range schema.Sequences { + existingSequences[seq.SQLName()] = seq + } + + // Merge sequences + for _, srcSeq := range source.Sequences { + seqName := srcSeq.SQLName() + if _, exists := existingSequences[seqName]; !exists { + // Sequence doesn't exist, add it + newSeq := cloneSequence(srcSeq) + schema.Sequences = append(schema.Sequences, newSeq) + r.SequencesAdded++ + } + } +} + +func (r *MergeResult) mergeEnums(schema *models.Schema, source *models.Schema) { + // Create map of existing enums + existingEnums := make(map[string]*models.Enum) + for _, enum := range schema.Enums { + existingEnums[enum.SQLName()] = enum + } + + // Merge enums + for _, srcEnum := range source.Enums { + enumName := srcEnum.SQLName() + if _, exists := existingEnums[enumName]; !exists { + // Enum doesn't exist, add it + newEnum := cloneEnum(srcEnum) + schema.Enums = append(schema.Enums, newEnum) + r.EnumsAdded++ + } + } +} + +func (r *MergeResult) mergeRelations(schema *models.Schema, source *models.Schema) { + // Create map of existing relations + existingRelations := make(map[string]*models.Relationship) + for _, rel := range schema.Relations { + existingRelations[rel.SQLName()] = rel + } + + // Merge relations + for _, srcRel := range source.Relations { + if _, exists := existingRelations[srcRel.SQLName()]; !exists { + // Relation doesn't exist, add it + newRel := cloneRelation(srcRel) + schema.Relations = append(schema.Relations, newRel) + r.RelationsAdded++ + } + } +} + +func (r *MergeResult) mergeDomains(target *models.Database, source *models.Database) { + // Create map of existing domains + existingDomains := make(map[string]*models.Domain) + for _, domain := range target.Domains { + existingDomains[domain.SQLName()] = domain + } + + // Merge domains + for _, srcDomain := range source.Domains { + domainName := srcDomain.SQLName() + if _, exists := existingDomains[domainName]; !exists { + // Domain doesn't exist, add it + newDomain := cloneDomain(srcDomain) + target.Domains = append(target.Domains, newDomain) + r.DomainsAdded++ + } + } +} + +// Clone functions to create deep copies of models + +func cloneSchema(schema *models.Schema) *models.Schema { + if schema == nil { + return nil + } + newSchema := &models.Schema{ + Name: schema.Name, + Description: schema.Description, + Owner: schema.Owner, + Comment: schema.Comment, + Sequence: schema.Sequence, + UpdatedAt: schema.UpdatedAt, + Tables: make([]*models.Table, 0), + Views: make([]*models.View, 0), + Sequences: make([]*models.Sequence, 0), + Enums: make([]*models.Enum, 0), + Relations: make([]*models.Relationship, 0), + } + + if schema.Permissions != nil { + newSchema.Permissions = make(map[string]string) + for k, v := range schema.Permissions { + newSchema.Permissions[k] = v + } + } + + if schema.Metadata != nil { + newSchema.Metadata = make(map[string]interface{}) + for k, v := range schema.Metadata { + newSchema.Metadata[k] = v + } + } + + if schema.Scripts != nil { + newSchema.Scripts = make([]*models.Script, len(schema.Scripts)) + copy(newSchema.Scripts, schema.Scripts) + } + + // Clone tables + for _, table := range schema.Tables { + newSchema.Tables = append(newSchema.Tables, cloneTable(table)) + } + + // Clone views + for _, view := range schema.Views { + newSchema.Views = append(newSchema.Views, cloneView(view)) + } + + // Clone sequences + for _, seq := range schema.Sequences { + newSchema.Sequences = append(newSchema.Sequences, cloneSequence(seq)) + } + + // Clone enums + for _, enum := range schema.Enums { + newSchema.Enums = append(newSchema.Enums, cloneEnum(enum)) + } + + // Clone relations + for _, rel := range schema.Relations { + newSchema.Relations = append(newSchema.Relations, cloneRelation(rel)) + } + + return newSchema +} + +func cloneTable(table *models.Table) *models.Table { + if table == nil { + return nil + } + newTable := &models.Table{ + Name: table.Name, + Description: table.Description, + Schema: table.Schema, + Comment: table.Comment, + Sequence: table.Sequence, + UpdatedAt: table.UpdatedAt, + Columns: make(map[string]*models.Column), + Constraints: make(map[string]*models.Constraint), + Indexes: make(map[string]*models.Index), + } + + if table.Metadata != nil { + newTable.Metadata = make(map[string]interface{}) + for k, v := range table.Metadata { + newTable.Metadata[k] = v + } + } + + // Clone columns + for colName, col := range table.Columns { + newTable.Columns[colName] = cloneColumn(col) + } + + // Clone constraints + for constName, constraint := range table.Constraints { + newTable.Constraints[constName] = cloneConstraint(constraint) + } + + // Clone indexes + for idxName, index := range table.Indexes { + newTable.Indexes[idxName] = cloneIndex(index) + } + + return newTable +} + +func cloneColumn(col *models.Column) *models.Column { + if col == nil { + return nil + } + newCol := &models.Column{ + Name: col.Name, + Type: col.Type, + Description: col.Description, + Comment: col.Comment, + IsPrimaryKey: col.IsPrimaryKey, + NotNull: col.NotNull, + Default: col.Default, + Precision: col.Precision, + Scale: col.Scale, + Length: col.Length, + Sequence: col.Sequence, + AutoIncrement: col.AutoIncrement, + Collation: col.Collation, + } + + return newCol +} + +func cloneConstraint(constraint *models.Constraint) *models.Constraint { + if constraint == nil { + return nil + } + newConstraint := &models.Constraint{ + Type: constraint.Type, + Columns: make([]string, len(constraint.Columns)), + ReferencedTable: constraint.ReferencedTable, + ReferencedSchema: constraint.ReferencedSchema, + ReferencedColumns: make([]string, len(constraint.ReferencedColumns)), + OnUpdate: constraint.OnUpdate, + OnDelete: constraint.OnDelete, + Expression: constraint.Expression, + Name: constraint.Name, + Deferrable: constraint.Deferrable, + InitiallyDeferred: constraint.InitiallyDeferred, + Sequence: constraint.Sequence, + } + copy(newConstraint.Columns, constraint.Columns) + copy(newConstraint.ReferencedColumns, constraint.ReferencedColumns) + return newConstraint +} + +func cloneIndex(index *models.Index) *models.Index { + if index == nil { + return nil + } + newIndex := &models.Index{ + Name: index.Name, + Description: index.Description, + Table: index.Table, + Schema: index.Schema, + Columns: make([]string, len(index.Columns)), + Unique: index.Unique, + Type: index.Type, + Where: index.Where, + Concurrent: index.Concurrent, + Include: make([]string, len(index.Include)), + Comment: index.Comment, + Sequence: index.Sequence, + } + copy(newIndex.Columns, index.Columns) + copy(newIndex.Include, index.Include) + return newIndex +} + +func cloneView(view *models.View) *models.View { + if view == nil { + return nil + } + newView := &models.View{ + Name: view.Name, + Description: view.Description, + Schema: view.Schema, + Definition: view.Definition, + Comment: view.Comment, + Sequence: view.Sequence, + Columns: make(map[string]*models.Column), + } + + if view.Metadata != nil { + newView.Metadata = make(map[string]interface{}) + for k, v := range view.Metadata { + newView.Metadata[k] = v + } + } + + // Clone columns + for colName, col := range view.Columns { + newView.Columns[colName] = cloneColumn(col) + } + + return newView +} + +func cloneSequence(seq *models.Sequence) *models.Sequence { + if seq == nil { + return nil + } + newSeq := &models.Sequence{ + Name: seq.Name, + Description: seq.Description, + Schema: seq.Schema, + StartValue: seq.StartValue, + MinValue: seq.MinValue, + MaxValue: seq.MaxValue, + IncrementBy: seq.IncrementBy, + CacheSize: seq.CacheSize, + Cycle: seq.Cycle, + OwnedByTable: seq.OwnedByTable, + OwnedByColumn: seq.OwnedByColumn, + Comment: seq.Comment, + Sequence: seq.Sequence, + } + return newSeq +} + +func cloneEnum(enum *models.Enum) *models.Enum { + if enum == nil { + return nil + } + newEnum := &models.Enum{ + Name: enum.Name, + Values: make([]string, len(enum.Values)), + Schema: enum.Schema, + } + copy(newEnum.Values, enum.Values) + return newEnum +} + +func cloneRelation(rel *models.Relationship) *models.Relationship { + if rel == nil { + return nil + } + newRel := &models.Relationship{ + Name: rel.Name, + Type: rel.Type, + FromTable: rel.FromTable, + FromSchema: rel.FromSchema, + FromColumns: make([]string, len(rel.FromColumns)), + ToTable: rel.ToTable, + ToSchema: rel.ToSchema, + ToColumns: make([]string, len(rel.ToColumns)), + ForeignKey: rel.ForeignKey, + ThroughTable: rel.ThroughTable, + ThroughSchema: rel.ThroughSchema, + Description: rel.Description, + Sequence: rel.Sequence, + } + + if rel.Properties != nil { + newRel.Properties = make(map[string]string) + for k, v := range rel.Properties { + newRel.Properties[k] = v + } + } + + copy(newRel.FromColumns, rel.FromColumns) + copy(newRel.ToColumns, rel.ToColumns) + return newRel +} + +func cloneDomain(domain *models.Domain) *models.Domain { + if domain == nil { + return nil + } + newDomain := &models.Domain{ + Name: domain.Name, + Description: domain.Description, + Comment: domain.Comment, + Sequence: domain.Sequence, + Tables: make([]*models.DomainTable, len(domain.Tables)), + } + + if domain.Metadata != nil { + newDomain.Metadata = make(map[string]interface{}) + for k, v := range domain.Metadata { + newDomain.Metadata[k] = v + } + } + + copy(newDomain.Tables, domain.Tables) + return newDomain +} + +// GetMergeSummary returns a human-readable summary of the merge result +func GetMergeSummary(result *MergeResult) string { + if result == nil { + return "No merge result available" + } + + lines := []string{ + "=== Merge Summary ===", + fmt.Sprintf("Schemas added: %d", result.SchemasAdded), + fmt.Sprintf("Tables added: %d", result.TablesAdded), + fmt.Sprintf("Columns added: %d", result.ColumnsAdded), + fmt.Sprintf("Views added: %d", result.ViewsAdded), + fmt.Sprintf("Sequences added: %d", result.SequencesAdded), + fmt.Sprintf("Enums added: %d", result.EnumsAdded), + fmt.Sprintf("Relations added: %d", result.RelationsAdded), + fmt.Sprintf("Domains added: %d", result.DomainsAdded), + } + + totalAdded := result.SchemasAdded + result.TablesAdded + result.ColumnsAdded + + result.ViewsAdded + result.SequencesAdded + result.EnumsAdded + + result.RelationsAdded + result.DomainsAdded + + lines = append(lines, fmt.Sprintf("Total items added: %d", totalAdded)) + + summary := "" + for _, line := range lines { + summary += line + "\n" + } + + return summary +} diff --git a/pkg/ui/load_save_screens.go b/pkg/ui/load_save_screens.go index bd6d0f0..762ec6f 100644 --- a/pkg/ui/load_save_screens.go +++ b/pkg/ui/load_save_screens.go @@ -4,10 +4,12 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" + "git.warky.dev/wdevs/relspecgo/pkg/merge" "git.warky.dev/wdevs/relspecgo/pkg/models" "git.warky.dev/wdevs/relspecgo/pkg/readers" rbun "git.warky.dev/wdevs/relspecgo/pkg/readers/bun" @@ -522,3 +524,268 @@ Examples: - File: ~/schemas/mydb.dbml - Directory (for code formats): ./models/` } + +// showImportScreen displays the import/merge database screen +func (se *SchemaEditor) showImportScreen() { + flex := tview.NewFlex().SetDirection(tview.FlexRow) + + // Title + title := tview.NewTextView(). + SetText("[::b]Import & Merge Database Schema"). + SetTextAlign(tview.AlignCenter). + SetDynamicColors(true) + + // Form + form := tview.NewForm() + form.SetBorder(true).SetTitle(" Import Configuration ").SetTitleAlign(tview.AlignLeft) + + // Format selection + formatOptions := []string{ + "dbml", "dctx", "drawdb", "graphql", "json", "yaml", + "gorm", "bun", "drizzle", "prisma", "typeorm", "pgsql", + } + selectedFormat := 0 + currentFormat := formatOptions[selectedFormat] + + // File path input + filePath := "" + connString := "" + skipDomains := false + skipRelations := false + skipEnums := false + skipViews := false + skipSequences := false + skipTables := "" + + form.AddDropDown("Format", formatOptions, 0, func(option string, index int) { + selectedFormat = index + currentFormat = option + }) + + form.AddInputField("File Path", "", 50, nil, func(value string) { + filePath = value + }) + + form.AddInputField("Connection String", "", 50, nil, func(value string) { + connString = value + }) + + form.AddInputField("Skip Tables (comma-separated)", "", 50, nil, func(value string) { + skipTables = value + }) + + form.AddCheckbox("Skip Domains", false, func(checked bool) { + skipDomains = checked + }) + + form.AddCheckbox("Skip Relations", false, func(checked bool) { + skipRelations = checked + }) + + form.AddCheckbox("Skip Enums", false, func(checked bool) { + skipEnums = checked + }) + + form.AddCheckbox("Skip Views", false, func(checked bool) { + skipViews = checked + }) + + form.AddCheckbox("Skip Sequences", false, func(checked bool) { + skipSequences = checked + }) + + form.AddTextView("Help", getImportHelpText(), 0, 7, true, false) + + // Buttons + form.AddButton("Import & Merge [i]", func() { + se.importAndMergeDatabase(currentFormat, filePath, connString, skipDomains, skipRelations, skipEnums, skipViews, skipSequences, skipTables) + }) + + form.AddButton("Back [b]", func() { + se.pages.RemovePage("import-database") + se.pages.SwitchToPage("main") + }) + + form.AddButton("Exit [q]", func() { + se.app.Stop() + }) + + // Keyboard shortcuts + form.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Key() == tcell.KeyEscape { + se.pages.RemovePage("import-database") + se.pages.SwitchToPage("main") + return nil + } + switch event.Rune() { + case 'i': + se.importAndMergeDatabase(currentFormat, filePath, connString, skipDomains, skipRelations, skipEnums, skipViews, skipSequences, skipTables) + return nil + case 'b': + se.pages.RemovePage("import-database") + se.pages.SwitchToPage("main") + return nil + case 'q': + se.app.Stop() + return nil + } + return event + }) + + flex.AddItem(title, 1, 0, false). + AddItem(form, 0, 1, true) + + se.pages.AddAndSwitchToPage("import-database", flex, true) +} + +// importAndMergeDatabase imports and merges a database from the specified configuration +func (se *SchemaEditor) importAndMergeDatabase(format, filePath, connString string, skipDomains, skipRelations, skipEnums, skipViews, skipSequences bool, skipTables string) { + // Validate input + if format == "pgsql" { + if connString == "" { + se.showErrorDialog("Error", "Connection string is required for PostgreSQL") + return + } + } else { + if filePath == "" { + se.showErrorDialog("Error", "File path is required for "+format) + return + } + // Expand home directory + if len(filePath) > 0 && filePath[0] == '~' { + home, err := os.UserHomeDir() + if err == nil { + filePath = filepath.Join(home, filePath[1:]) + } + } + } + + // Create reader + var reader readers.Reader + switch format { + case "dbml": + reader = rdbml.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "dctx": + reader = rdctx.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "drawdb": + reader = rdrawdb.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "graphql": + reader = rgraphql.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "json": + reader = rjson.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "yaml": + reader = ryaml.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "gorm": + reader = rgorm.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "bun": + reader = rbun.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "drizzle": + reader = rdrizzle.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "prisma": + reader = rprisma.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "typeorm": + reader = rtypeorm.NewReader(&readers.ReaderOptions{FilePath: filePath}) + case "pgsql": + reader = rpgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString}) + default: + se.showErrorDialog("Error", "Unsupported format: "+format) + return + } + + // Read the database to import + importDb, err := reader.ReadDatabase() + if err != nil { + se.showErrorDialog("Import Error", fmt.Sprintf("Failed to read database: %v", err)) + return + } + + // Show confirmation dialog + se.showImportConfirmation(importDb, skipDomains, skipRelations, skipEnums, skipViews, skipSequences, skipTables) +} + +// showImportConfirmation shows a confirmation dialog before merging +func (se *SchemaEditor) showImportConfirmation(importDb *models.Database, skipDomains, skipRelations, skipEnums, skipViews, skipSequences bool, skipTables string) { + confirmText := fmt.Sprintf("Import & Merge Database?\n\nSource: %s\nTarget: %s\n\nThis will add missing schemas, tables, columns, and other objects from the source to your database.\n\nExisting items will NOT be modified.", + importDb.Name, se.db.Name) + + modal := tview.NewModal(). + SetText(confirmText). + AddButtons([]string{"Cancel", "Merge"}). + SetDoneFunc(func(buttonIndex int, buttonLabel string) { + se.pages.RemovePage("import-confirm") + if buttonLabel == "Merge" { + se.performMerge(importDb, skipDomains, skipRelations, skipEnums, skipViews, skipSequences, skipTables) + } + }) + + modal.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Key() == tcell.KeyEscape { + se.pages.RemovePage("import-confirm") + se.pages.SwitchToPage("import-database") + return nil + } + return event + }) + + se.pages.AddAndSwitchToPage("import-confirm", modal, true) +} + +// performMerge performs the actual merge operation +func (se *SchemaEditor) performMerge(importDb *models.Database, skipDomains, skipRelations, skipEnums, skipViews, skipSequences bool, skipTables string) { + // Create merge options + opts := &merge.MergeOptions{ + SkipDomains: skipDomains, + SkipRelations: skipRelations, + SkipEnums: skipEnums, + SkipViews: skipViews, + SkipSequences: skipSequences, + } + + // Parse skip tables + if skipTables != "" { + opts.SkipTableNames = parseSkipTablesUI(skipTables) + } + + // Perform the merge + result := merge.MergeDatabases(se.db, importDb, opts) + + // Update the database timestamp + se.db.UpdateDate() + + // Show success dialog with summary + summary := merge.GetMergeSummary(result) + se.showSuccessDialog("Import Complete", summary, func() { + se.pages.RemovePage("import-database") + se.pages.RemovePage("main") + se.pages.AddPage("main", se.createMainMenu(), true, true) + }) +} + +// getImportHelpText returns the help text for the import screen +func getImportHelpText() string { + return `Import & Merge: Adds missing schemas, tables, columns, and other objects to your existing database. + +File-based formats: dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm +Database formats: pgsql (requires connection string) + +Skip options: Check to exclude specific object types from the merge.` +} + +func parseSkipTablesUI(skipTablesStr string) map[string]bool { + skipTables := make(map[string]bool) + if skipTablesStr == "" { + return skipTables + } + + // Split by comma and trim whitespace + parts := strings.Split(skipTablesStr, ",") + for _, part := range parts { + trimmed := strings.TrimSpace(part) + if trimmed != "" { + // Store in lowercase for case-insensitive matching + skipTables[strings.ToLower(trimmed)] = true + } + } + + return skipTables +} diff --git a/pkg/ui/main_menu.go b/pkg/ui/main_menu.go index 0d2b0a5..864d2e4 100644 --- a/pkg/ui/main_menu.go +++ b/pkg/ui/main_menu.go @@ -39,6 +39,9 @@ func (se *SchemaEditor) createMainMenu() tview.Primitive { AddItem("Manage Domains", "View, create, edit, and delete domains", 'd', func() { se.showDomainList() }). + AddItem("Import & Merge", "Import and merge schema from another database", 'i', func() { + se.showImportScreen() + }). AddItem("Save Database", "Save database to file or database", 'w', func() { se.showSaveScreen() }).