Compare commits

...

32 Commits

Author SHA1 Message Date
30ef1db010 chore(release): update package version to 1.0.56
All checks were successful
Release / test (push) Successful in -32m19s
Release / release (push) Successful in -31m39s
Release / pkg-deb (push) Successful in -31m57s
Release / pkg-aur (push) Successful in -31m46s
Release / pkg-rpm (push) Successful in -4m7s
2026-05-05 14:51:10 +02:00
2d97a47ee1 feat: Enhance PostgreSQL type handling and migration scripts
- Introduced equivalent base types and variants for PostgreSQL types to normalize type comparisons.
- Added functions for normalizing SQL types and retrieving equivalent type variants.
- Updated migration writer to handle type alterations with checks for existing types.
- Implemented logic to create necessary extensions (e.g., pg_trgm) based on schema requirements.
- Enhanced tests to cover new functionality for type normalization and migration handling.
- Improved handling of GIN indexes to use appropriate operator classes based on column types.
2026-05-05 14:50:34 +02:00
72200ea72e chore(release): update package version to 1.0.55
All checks were successful
Release / test (push) Successful in -32m1s
Release / release (push) Successful in -31m13s
Release / pkg-aur (push) Successful in -32m13s
Release / pkg-deb (push) Successful in -31m12s
Release / pkg-rpm (push) Successful in -29m45s
2026-05-05 11:36:29 +02:00
608893a3d6 feat(index): implement GIN index support for quoted text columns and enhance index column resolution 2026-05-05 11:32:15 +02:00
53ff745d5d chore(release): update package version to 1.0.54
All checks were successful
Release / test (push) Successful in -31m47s
Release / release (push) Successful in -31m9s
Release / pkg-aur (push) Successful in -31m57s
Release / pkg-deb (push) Successful in -31m1s
Release / pkg-rpm (push) Successful in -29m27s
2026-05-05 11:12:49 +02:00
17bc8ed395 feat(migration): enhance primary key handling and add GIN index support in migration writer 2026-05-05 11:12:23 +02:00
a447b68b22 chore(release): update package version to 1.0.53
All checks were successful
Release / test (push) Successful in -31m55s
Release / release (push) Successful in -31m19s
Release / pkg-aur (push) Successful in -32m3s
Release / pkg-deb (push) Successful in -31m21s
Release / pkg-rpm (push) Successful in -28m4s
2026-05-05 10:48:27 +02:00
4303dcf59b Support typed primary key helpers in gorm and bun writers 2026-05-05 10:32:33 +02:00
e828d48798 chore(release): update package version to 1.0.52
All checks were successful
Release / test (push) Successful in -32m39s
Release / release (push) Successful in -32m1s
Release / pkg-deb (push) Successful in -32m9s
Release / pkg-aur (push) Successful in -31m37s
Release / pkg-rpm (push) Successful in -27m28s
2026-05-03 17:19:22 +02:00
6e470a9239 fix(type_mapper): adjust array tag handling in BuildBunTag 2026-05-03 17:18:58 +02:00
096815fe49 chore(release): update package version to 1.0.51
All checks were successful
Release / test (push) Successful in -32m30s
Release / release (push) Successful in -31m54s
Release / pkg-aur (push) Successful in -32m31s
Release / pkg-deb (push) Successful in -32m7s
Release / pkg-rpm (push) Successful in -30m36s
2026-05-03 16:11:13 +02:00
b8f60203cb fix(type_mapper): handle PostgreSQL array types in tags
* Update BuildBunTag to append "array" for array types
* Add tests for handling array types in TypeMapper
* Adjust regex in SanitizeStructTagValue to preserve array suffix
2026-05-03 16:11:01 +02:00
Hein
15763f60cc Fix GIN opclass handling for array columns 2026-04-30 20:35:06 +02:00
Hein
6d2884f5cf chore(release): update package version to 1.0.50
Some checks failed
Release / test (push) Successful in -32m41s
Release / release (push) Successful in -28m56s
Release / pkg-deb (push) Successful in -31m19s
Release / pkg-aur (push) Successful in -27m21s
Release / pkg-rpm (push) Failing after -26m24s
2026-04-30 20:23:29 +02:00
Hein
f192decff8 Add Prisma 7 flag support 2026-04-30 20:22:57 +02:00
Hein
8b906cf4a3 chore(release): update package version to 1.0.49
All checks were successful
Release / test (push) Successful in -32m39s
Release / release (push) Successful in -31m40s
Release / pkg-aur (push) Successful in -32m46s
Release / pkg-deb (push) Successful in -32m9s
Release / pkg-rpm (push) Successful in -29m53s
2026-04-30 18:16:28 +02:00
Hein
0a3966e6fc fix(pgsql): handle default values for array types in migrations
* update default value quoting logic for PostgreSQL
* add tests for array default value handling
2026-04-30 18:16:21 +02:00
Hein
d30fc24f55 chore(release): update package version to 1.0.48
All checks were successful
Release / pkg-deb (push) Successful in -32m6s
Release / test (push) Successful in -32m44s
Release / release (push) Successful in -32m5s
Release / pkg-aur (push) Successful in -32m38s
Release / pkg-rpm (push) Successful in -30m46s
2026-04-30 16:07:33 +02:00
Hein
16a489d0b8 style(pkg): align json and numeric type mappings 2026-04-30 16:07:16 +02:00
Hein
3524e86282 feat: add --types flag and stdlib nullable type support for bun/gorm writers
* Fix pgsql reader double-quoting defaults: normalizePostgresDefault strips
  surrounding SQL string literal quotes from column_default before storing,
  matching the convention used by every other reader.

* Add NullableTypes field to WriterOptions with NullableTypeResolveSpec
  (default) and NullableTypeStdlib constants.

* Both bun and gorm TypeMappers now accept a typeStyle parameter. stdlib
  mode produces sql.NullString/NullInt32/NullTime etc. for nullable scalars,
  plain Go slices for arrays, and time.Time for NOT NULL timestamps. Default
  resolvespec behaviour is unchanged.

* Add --types flag to convert and split commands.

* Update bun/README.md and gorm/README.md with side-by-side generated code
  examples, updated type mapping tables, and Writer Options documentation.
2026-04-30 16:00:54 +02:00
Hein
1e54fdcd7f Merge branch 'master' of git.warky.dev:wdevs/relspecgo 2026-04-30 15:15:34 +02:00
fb104ea084 feat: PostgreSQL connections opened by relspec set application_name by default to relspecgo/<version>
All checks were successful
Release / test (push) Successful in -31m41s
Release / release (push) Successful in -28m47s
Release / pkg-aur (push) Successful in -32m40s
Release / pkg-deb (push) Successful in -32m25s
Release / pkg-rpm (push) Successful in -28m30s
2026-04-26 17:48:26 +02:00
837160b77a feat(pgsql): implement application_name handling in connection 2026-04-26 17:45:25 +02:00
ed7130bba8 refactor(pkg): canonicalize base types and adjust length handling
* Update base types to keep explicit modifier forms
* Modify length handling for vector types in tests
2026-04-26 17:35:15 +02:00
4ca1810d07 refactor(dctx): sort table columns and indexes for deterministic output
Some checks failed
Release / test (push) Failing after -31m18s
Release / release (push) Has been skipped
Release / pkg-aur (push) Has been skipped
Release / pkg-deb (push) Has been skipped
Release / pkg-rpm (push) Has been skipped
2026-04-26 12:50:39 +02:00
c0880cb076 feat(pkg): preserve PostgreSQL types in mapDataType function
Some checks failed
Release / test (push) Failing after -31m27s
Release / release (push) Has been skipped
Release / pkg-aur (push) Has been skipped
Release / pkg-deb (push) Has been skipped
Release / pkg-rpm (push) Has been skipped
* Add support for known PostgreSQL types and modifiers
* Implement canonicalization for PostgreSQL types
* Introduce unit tests for PostgreSQL type handling
2026-04-26 12:43:44 +02:00
988798998d test(drawdb): add test for converting column types with modifiers
* Implement tests to ensure explicit type modifiers are preserved during conversion.
* Validate behavior for varchar, numeric, and custom vector types.
2026-04-26 12:35:54 +02:00
535a91d4be feat(docs): add comprehensive story of RelSpecGo's development journey 2026-04-08 22:21:24 +02:00
bd54e85727 chore(release): update package version to 1.0.44
All checks were successful
Release / pkg-deb (push) Successful in -29m54s
Release / pkg-rpm (push) Successful in -27m4s
Release / test (push) Successful in -30m26s
Release / release (push) Successful in -29m48s
Release / pkg-aur (push) Successful in -28m28s
2026-04-08 21:34:28 +02:00
b042b2d508 docs: 📝 Update documentation 2026-04-08 21:34:00 +02:00
af1733dc9a feat(pkg): update package description for clarity and consistency 2026-04-08 21:21:33 +02:00
Hein
3d9cc7ec58 .
All checks were successful
Release / Build and Release (push) Successful in -25m33s
2026-02-20 16:32:19 +02:00
72 changed files with 4488 additions and 845 deletions

0
.codex Normal file
View File

323
README.md
View File

@@ -6,264 +6,165 @@
[![Go Version](https://img.shields.io/badge/go-1.24.0-blue.svg)](https://go.dev/dl/)
[![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE)
> Database Relations Specification Tool for Go
> Bidirectional database schema conversion, validation, and templating tool.
RelSpec is a comprehensive database relations management tool that reads, transforms, and writes database table specifications across multiple formats and ORMs.
![RelSpec](./assets/image/relspec1_512.jpg)
## Overview
RelSpec provides bidirectional conversion, comparison, and validation of database specification formats, allowing you to:
- Inspect live databases and extract their structure
- Validate schemas against configurable rules and naming conventions
- Convert between different ORM models (GORM, Bun, etc.)
- Transform legacy schema definitions (Clarion DCTX, XML, JSON, etc.)
- Generate standardized specification files (JSON, YAML, etc.)
- Compare database schemas and track changes
![1.00](./assets/image/relspec1_512.jpg)
## Features
### Readers (Input Formats)
RelSpec can read database schemas from multiple sources:
#### ORM Models
- [GORM](pkg/readers/gorm/README.md) - Go GORM model definitions
- [Bun](pkg/readers/bun/README.md) - Go Bun model definitions
- [Drizzle](pkg/readers/drizzle/README.md) - TypeScript Drizzle ORM schemas
- [Prisma](pkg/readers/prisma/README.md) - Prisma schema language
- [TypeORM](pkg/readers/typeorm/README.md) - TypeScript TypeORM entities
#### Database Inspection
- [PostgreSQL](pkg/readers/pgsql/README.md) - Direct PostgreSQL database introspection
- [SQLite](pkg/readers/sqlite/README.md) - Direct SQLite database introspection
#### Schema Formats
- [DBML](pkg/readers/dbml/README.md) - Database Markup Language (dbdiagram.io)
- [DCTX](pkg/readers/dctx/README.md) - Clarion database dictionary format
- [DrawDB](pkg/readers/drawdb/README.md) - DrawDB JSON format
- [GraphQL](pkg/readers/graphql/README.md) - GraphQL Schema Definition Language (SDL)
- [JSON](pkg/readers/json/README.md) - RelSpec canonical JSON format
- [YAML](pkg/readers/yaml/README.md) - RelSpec canonical YAML format
### Writers (Output Formats)
RelSpec can write database schemas to multiple formats:
#### ORM Models
- [GORM](pkg/writers/gorm/README.md) - Generate GORM-compatible Go structs
- [Bun](pkg/writers/bun/README.md) - Generate Bun-compatible Go structs
- [Drizzle](pkg/writers/drizzle/README.md) - Generate Drizzle ORM TypeScript schemas
- [Prisma](pkg/writers/prisma/README.md) - Generate Prisma schema files
- [TypeORM](pkg/writers/typeorm/README.md) - Generate TypeORM TypeScript entities
#### Database DDL
- [PostgreSQL](pkg/writers/pgsql/README.md) - PostgreSQL DDL (CREATE TABLE, etc.)
- [SQLite](pkg/writers/sqlite/README.md) - SQLite DDL with automatic schema flattening
#### Schema Formats
- [DBML](pkg/writers/dbml/README.md) - Database Markup Language
- [DCTX](pkg/writers/dctx/README.md) - Clarion database dictionary format
- [DrawDB](pkg/writers/drawdb/README.md) - DrawDB JSON format
- [GraphQL](pkg/writers/graphql/README.md) - GraphQL Schema Definition Language (SDL)
- [JSON](pkg/writers/json/README.md) - RelSpec canonical JSON format
- [YAML](pkg/writers/yaml/README.md) - RelSpec canonical YAML format
### Inspector (Schema Validation)
RelSpec includes a powerful schema validation and linting tool:
- [Inspector](pkg/inspector/README.md) - Validate database schemas against configurable rules
- Enforce naming conventions (snake_case, camelCase, custom patterns)
- Check primary key and foreign key standards
- Detect missing indexes on foreign keys
- Prevent use of SQL reserved keywords
- Ensure schema integrity (missing PKs, orphaned FKs, circular dependencies)
- Support for custom validation rules
- Multiple output formats (Markdown with colors, JSON)
- CI/CD integration ready
## Use of AI
[Rules and use of AI](./AI_USE.md)
## User Interface
RelSpec provides an interactive terminal-based user interface for managing and editing database schemas. The UI allows you to:
- **Browse Databases** - Navigate through your database structure with an intuitive menu system
- **Edit Schemas** - Create, modify, and organize database schemas
- **Manage Tables** - Add, update, or delete tables with full control over structure
- **Configure Columns** - Define column properties, data types, constraints, and relationships
- **Interactive Editing** - Real-time validation and feedback as you make changes
The interface supports multiple input formats, making it easy to load, edit, and save your database definitions in various formats.
<p align="center" width="100%">
<img src="./assets/image/screenshots/main_screen.jpg">
</p>
<p align="center" width="100%">
<img src="./assets/image/screenshots/table_view.jpg">
</p>
<p align="center" width="100%">
<img src="./assets/image/screenshots/edit_column.jpg">
</p>
## Installation
## Install
```bash
go get github.com/wdevs/relspecgo
go install -v git.warky.dev/wdevs/relspecgo/cmd/relspec@latest
```
## Usage
## Supported Formats
### Interactive Schema Editor
| Direction | Formats |
|-----------|---------|
| **Readers** | `bun` `dbml` `dctx` `drawdb` `drizzle` `gorm` `graphql` `json` `mssql` `pgsql` `prisma` `sqldir` `sqlite` `typeorm` `yaml` |
| **Writers** | `bun` `dbml` `dctx` `drawdb` `drizzle` `gorm` `graphql` `json` `mssql` `pgsql` `prisma` `sqlexec` `sqlite` `template` `typeorm` `yaml` |
## Commands
### `convert` — Schema conversion
```bash
# Launch interactive editor with a DBML schema
relspec edit --from dbml --from-path schema.dbml --to dbml --to-path schema.dbml
# PostgreSQL → GORM models
relspec convert --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
--to gorm --to-path models/ --package models
# Edit PostgreSQL database in place
relspec edit --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
--to pgsql --to-conn "postgres://user:pass@localhost/mydb"
# DBML → PostgreSQL DDL
relspec convert --from dbml --from-path schema.dbml --to pgsql --to-path schema.sql
# Edit JSON schema and save as GORM models
relspec edit --from json --from-path db.json --to gorm --to-path models/
# PostgreSQL → SQLite (auto flattens schemas)
relspec convert --from pgsql --from-conn "postgres://..." --to sqlite --to-path schema.sql
# Multiple input files merged
relspec convert --from json --from-list "a.json,b.json" --to yaml --to-path merged.yaml
```
The `edit` command launches an interactive terminal user interface where you can:
- Browse and navigate your database structure
- Create, modify, and delete schemas, tables, and columns
- Configure column properties, constraints, and relationships
- Save changes to various formats
- Import and merge schemas from other databases
PostgreSQL connections opened by relspec set `application_name` by default to
`relspecgo/<version>` (with component suffixes internally, e.g. readers/writers).
If you need a custom value, provide `application_name` explicitly in the connection
string query parameters.
### Schema Merging
### `merge` — Additive schema merge (never modifies existing items)
```bash
# Merge two JSON schemas (additive merge - adds missing items only)
# Merge two JSON schemas
relspec merge --target json --target-path base.json \
--source json --source-path additions.json \
--output json --output-path merged.json
# Merge PostgreSQL database into JSON, skipping specific tables
# Merge PostgreSQL into JSON, skipping tables
relspec merge --target json --target-path current.json \
--source pgsql --source-conn "postgres://user:pass@localhost/source_db" \
--source pgsql --source-conn "postgres://user:pass@localhost/db" \
--output json --output-path updated.json \
--skip-tables "audit_log,temp_tables"
# Cross-format merge (DBML + YAML → JSON)
relspec merge --target dbml --target-path base.dbml \
--source yaml --source-path additions.yaml \
--output json --output-path result.json \
--skip-relations --skip-views
```
The `merge` command combines two database schemas additively:
- Adds missing schemas, tables, columns, and other objects
- Never modifies or deletes existing items (safe operation)
- Supports selective merging with skip options (domains, relations, enums, views, sequences, specific tables)
- Works across any combination of supported formats
- Perfect for integrating multiple schema definitions or applying patches
Skip flags: `--skip-relations` `--skip-views` `--skip-domains` `--skip-enums` `--skip-sequences`
### Schema Conversion
### `inspect` — Schema validation / linting
```bash
# Convert PostgreSQL database to GORM models
relspec convert --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
--to gorm --to-path models/ --package models
# Convert GORM models to Bun
relspec convert --from gorm --from-path models.go \
--to bun --to-path bun_models.go --package models
# Export database schema to JSON
relspec convert --from pgsql --from-conn "postgres://..." \
--to json --to-path schema.json
# Convert DBML to PostgreSQL SQL
relspec convert --from dbml --from-path schema.dbml \
--to pgsql --to-path schema.sql
# Convert PostgreSQL database to SQLite (with automatic schema flattening)
relspec convert --from pgsql --from-conn "postgres://..." \
--to sqlite --to-path sqlite_schema.sql
```
### Schema Validation
```bash
# Validate a PostgreSQL database with default rules
# Validate PostgreSQL database
relspec inspect --from pgsql --from-conn "postgres://user:pass@localhost/mydb"
# Validate DBML file with custom rules
# Validate DBML with custom rules
relspec inspect --from dbml --from-path schema.dbml --rules .relspec-rules.yaml
# Generate JSON validation report
relspec inspect --from json --from-path db.json \
--output-format json --output report.json
# JSON report output
relspec inspect --from json --from-path db.json --output-format json --output report.json
# Validate specific schema only
# Filter to specific schema
relspec inspect --from pgsql --from-conn "..." --schema public
```
### Schema Comparison
Rules: naming conventions, PK/FK standards, missing indexes, reserved keywords, circular dependencies.
### `diff` — Schema comparison
```bash
# Compare two database schemas
relspec diff --from pgsql --from-conn "postgres://localhost/db1" \
--to pgsql --to-conn "postgres://localhost/db2"
```
### `templ` — Custom template rendering
```bash
# Render database schema to Markdown docs
relspec templ --from pgsql --from-conn "postgres://user:pass@localhost/db" \
--template docs.tmpl --output schema-docs.md
# One TypeScript file per table
relspec templ --from dbml --from-path schema.dbml \
--template ts-model.tmpl --mode table \
--output ./models/ --filename-pattern "{{.Name | toCamelCase}}.ts"
```
Modes: `database` (default) · `schema` · `table` · `script`
Template functions: string utils (`toCamelCase`, `toSnakeCase`, `pluralize`, …), type converters (`sqlToGo`, `sqlToTypeScript`, …), filters, loop helpers, safe access.
### `edit` — Interactive TUI editor
```bash
# Edit DBML schema interactively
relspec edit --from dbml --from-path schema.dbml --to dbml --to-path schema.dbml
# Edit live PostgreSQL database
relspec edit --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
--to pgsql --to-conn "postgres://user:pass@localhost/mydb"
```
<p align="center">
<img src="./assets/image/screenshots/main_screen.jpg">
</p>
<p align="center">
<img src="./assets/image/screenshots/table_view.jpg">
</p>
<p align="center">
<img src="./assets/image/screenshots/edit_column.jpg">
</p>
## Development
**Prerequisites:** Go 1.24.0+
```bash
make build # → build/relspec
make test # race detection + coverage
make lint # requires golangci-lint
make coverage # → coverage.html
make install # → $GOPATH/bin
```
## Project Structure
```
relspecgo/
├── cmd/
│ └── relspec/ # CLI application (convert, inspect, diff, scripts)
├── pkg/
│ ├── readers/ # Input format readers (DBML, GORM, PostgreSQL, etc.)
│ ├── writers/ # Output format writers (GORM, Bun, SQL, etc.)
│ ├── inspector/ # Schema validation and linting
│ ├── diff/ # Schema comparison
│ ├── models/ # Internal data models
│ ├── transform/ # Transformation logic
│ └── pgsql/ # PostgreSQL utilities (keywords, data types)
├── examples/ # Usage examples
└── tests/ # Test files
cmd/relspec/ CLI commands
pkg/readers/ Input format readers
pkg/writers/ Output format writers
pkg/inspector/ Schema validation
pkg/diff/ Schema comparison
pkg/merge/ Schema merging
pkg/models/ Internal data models
pkg/transform/ Transformation logic
pkg/pgsql/ PostgreSQL utilities
```
## Todo
[Todo List of Features](./TODO.md)
## Development
### Prerequisites
- Go 1.21 or higher
- Access to test databases (optional)
### Building
```bash
go build -o relspec ./cmd/relspec
```
### Testing
```bash
go test ./...
```
## License
Apache License 2.0 - See [LICENSE](LICENSE) for details.
Copyright 2025 Warky Devs
## Contributing
Contributions welcome. Please open an issue or submit a pull request.
1. Register or sign in with GitHub at [git.warky.dev](https://git.warky.dev)
2. Clone the repository: `git clone https://git.warky.dev/wdevs/relspecgo.git`
3. Create a feature branch: `git checkout -b feature/your-feature-name`
4. Commit your changes and push the branch
5. Open a pull request with a description of the new feature or fix
For questions or discussion, join the Discord: [discord.gg/74rcTujp25](https://discord.gg/74rcTujp25) — `warkyhein`
## Links
- [Todo](./TODO.md)
- [AI Use Policy](./AI_USE.md)
- [License](LICENSE) — Apache 2.0 · Copyright 2025 Warky Devs

219
Story.md Normal file
View File

@@ -0,0 +1,219 @@
# From Scripts to RelSpec: What Years of Database Pain Taught Me
It started as a need.
A problem Ive carried with me since my early PHP days.
Every project meant doing the same work again. Same patterns, same fixes—just in a different codebase.
It became frustrating fast.
I wanted something solid. Not another workaround.
## The Early Tools Phase
Like most things in development, it began small.
A simple PHP script.
Then a few Python scripts.
Just tools—nothing fancy. The goal was straightforward: generate code faster and remove repetitive work. I even experimented with Clarion templates at one point, trying to bend existing systems into something useful.
Then came SQL scripts.
Then PostgreSQL migration stored procedures.
Then small Go programs using templates.
Each step was solving a problem I had at the time. Nothing unified. Nothing polished. Just survival tools.
---
## Argitek: The First Real Attempt
Eventually, those scattered ideas turned into something more structured: Argitek.
Argitek powered a few real systems, including Powerbid. On paper, it sounded solid:
> “Argitek Next is a powerful code generation tool designed to streamline your development workflow.”
And technically, it worked.
It could generate code from predefined templates, adapt to different scenarios, and reduce repetitive work. But something was off.
It never felt *complete*.
Not something I could confidently release.
So I did what many developers do with almost-good-enough tools—I parked it.
---
## The Breaking Point: Database Migrations
Over the years, one problem kept coming back:
Database migrations.
Not the clean, theoretical kind. The real ones.
* PostgreSQL to ORM mismatches
* DBML to SQL hacks
* GORM inconsistencies
* Manual fixes after “automated” migrations failed
It was always messy. Always unpredictable. Always more work than expected.
By 2025, after a particularly tough year, I had accumulated enough of these problems to stop ignoring them.
---
## December 2025: RelSpecGo Begins
In December 2025, I bootstrapped something new:
**RelSpecGo**
It started simple:
* Initial LICENSE
* Basic configuration
* A direction
By late December:
* SQL writer implemented
* Diff command added
January 2026:
* Documentation
February 2026:
* Schema editor UI (focused on relationships)
* MSSQL DDL writer
* Template support with `--from-list`
---
## April 2026: A Real Tool Emerges
By April 2026, it became something I could finally stand behind.
RelSpecGo reached version **1.0.44**, with:
* Packaging for AUR, Debian, and RPM
* Updated documentation and README
* A full toolchain for:
* Convert
* Merge
* Inspect
* Diff
* Template
* Edit
Support includes:
* bun
* dbml
* drizzle
* gorm
* prisma
* mssql
* pgsql
* sqlite
Plus:
* TUI editor
* Template engine
* Bidirectional schema handling
👉 RelSpecGo: [https://git.warky.dev/wdevs/relspecgo](https://git.warky.dev/wdevs/relspecgo)
This wasnt just another generator anymore.
It became a system for managing *database truth*.
---
## Lessons Learned (The Hard Way)
This journey wasnt about tools. It was about understanding databases properly.
Here are the principles that stuck:
### 1. Data Loss Is Not Acceptable
Changing table structures should **never** result in lost data. If it does, the process is broken.
### 2. Minimal Beats Clever
The simpler the system, the easier it is to trust—and to fix.
### 3. Respect the Database
If you fight database rules, you will lose. Stay aligned with them.
### 4. Indexes and Keys Matter More Than You Think
Performance and correctness both depend on them. Ignore them at your own risk.
### 5. Version-Control Your Backend Logic
SQL scripts, functions, migrations—these must live in version control. No exceptions.
### 6. Its Not Migration—Its Adaptation
Youre not just moving data. Youre fixing inconsistencies and aligning systems.
### 7. Migrations Never Go as Planned
Always assume something will break. Plan for it.
### 8. One Source of Truth Is Non-Negotiable
Your database schema must have a single, authoritative definition.
### 9. ORM Mapping Is a First-Class Concern
Your application models must reflect the database correctly. Drift causes bugs.
### 10. Audit Trails Are Critical
If you cant track changes, you cant trust your system.
### 11. Manage Database Functions Properly
They are part of your system—not an afterthought.
### 12. If Its Hard to Understand, Its Too Complex
Clarity is a feature. Complexity is technical debt.
### 13. GUIDs Have Their Place
Especially when moving data across systems. They solve real problems.
### 14. But Simplicity Still Wins
Numbered primary keys are predictable, efficient, and easy to reason about.
### 15. JSON Is Power—Use It Carefully
It adds flexibility, but too much turns structure into chaos.
---
## Closing Thoughts
Looking back, this wasnt about building a tool.
It was about:
* Reducing friction
* Making systems predictable
* Respecting the database as the core of the system
RelSpecGo is just the current result of that journey.
Not the end.
Just the first version that feels *right*.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 171 KiB

After

Width:  |  Height:  |  Size: 200 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

After

Width:  |  Height:  |  Size: 200 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 192 KiB

View File

@@ -52,6 +52,7 @@ var (
convertPackageName string
convertSchemaFilter string
convertFlattenSchema bool
convertNullableTypes string
)
var convertCmd = &cobra.Command{
@@ -175,6 +176,7 @@ func init() {
convertCmd.Flags().StringVar(&convertPackageName, "package", "", "Package name (for code generation formats like gorm/bun)")
convertCmd.Flags().StringVar(&convertSchemaFilter, "schema", "", "Filter to a specific schema by name (required for formats like dctx that only support single schemas)")
convertCmd.Flags().BoolVar(&convertFlattenSchema, "flatten-schema", false, "Flatten schema.table names to schema_table (useful for databases like SQLite that do not support schemas)")
convertCmd.Flags().StringVar(&convertNullableTypes, "types", "", "Nullable type package for code-gen writers (bun/gorm): 'resolvespec' (default) or 'stdlib' (database/sql)")
err := convertCmd.MarkFlagRequired("from")
if err != nil {
@@ -241,7 +243,7 @@ func runConvert(cmd *cobra.Command, args []string) error {
fmt.Fprintf(os.Stderr, " Schema: %s\n", convertSchemaFilter)
}
if err := writeDatabase(db, convertTargetType, convertTargetPath, convertPackageName, convertSchemaFilter, convertFlattenSchema); err != nil {
if err := writeDatabase(db, convertTargetType, convertTargetPath, convertPackageName, convertSchemaFilter, convertFlattenSchema, convertNullableTypes); err != nil {
return fmt.Errorf("failed to write target: %w", err)
}
@@ -284,79 +286,79 @@ func readDatabaseForConvert(dbType, filePath, connString string) (*models.Databa
if filePath == "" {
return nil, fmt.Errorf("file path is required for DBML format")
}
reader = dbml.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = dbml.NewReader(newReaderOptions(filePath, ""))
case "dctx":
if filePath == "" {
return nil, fmt.Errorf("file path is required for DCTX format")
}
reader = dctx.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = dctx.NewReader(newReaderOptions(filePath, ""))
case "drawdb":
if filePath == "" {
return nil, fmt.Errorf("file path is required for DrawDB format")
}
reader = drawdb.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = drawdb.NewReader(newReaderOptions(filePath, ""))
case "json":
if filePath == "" {
return nil, fmt.Errorf("file path is required for JSON format")
}
reader = json.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = json.NewReader(newReaderOptions(filePath, ""))
case "yaml", "yml":
if filePath == "" {
return nil, fmt.Errorf("file path is required for YAML format")
}
reader = yaml.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = yaml.NewReader(newReaderOptions(filePath, ""))
case "pgsql", "postgres", "postgresql":
if connString == "" {
return nil, fmt.Errorf("connection string is required for PostgreSQL format")
}
reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
reader = pgsql.NewReader(newReaderOptions("", connString))
case "gorm":
if filePath == "" {
return nil, fmt.Errorf("file path is required for GORM format")
}
reader = gorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = gorm.NewReader(newReaderOptions(filePath, ""))
case "bun":
if filePath == "" {
return nil, fmt.Errorf("file path is required for Bun format")
}
reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = bun.NewReader(newReaderOptions(filePath, ""))
case "drizzle":
if filePath == "" {
return nil, fmt.Errorf("file path is required for Drizzle format")
}
reader = drizzle.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = drizzle.NewReader(newReaderOptions(filePath, ""))
case "prisma":
if filePath == "" {
return nil, fmt.Errorf("file path is required for Prisma format")
}
reader = prisma.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = prisma.NewReader(newReaderOptions(filePath, ""))
case "typeorm":
if filePath == "" {
return nil, fmt.Errorf("file path is required for TypeORM format")
}
reader = typeorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = typeorm.NewReader(newReaderOptions(filePath, ""))
case "graphql", "gql":
if filePath == "" {
return nil, fmt.Errorf("file path is required for GraphQL format")
}
reader = graphql.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = graphql.NewReader(newReaderOptions(filePath, ""))
case "mssql", "sqlserver", "mssql2016", "mssql2017", "mssql2019", "mssql2022":
if connString == "" {
return nil, fmt.Errorf("connection string is required for MSSQL format")
}
reader = mssql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
reader = mssql.NewReader(newReaderOptions("", connString))
case "sqlite", "sqlite3":
// SQLite can use either file path or connection string
@@ -367,7 +369,7 @@ func readDatabaseForConvert(dbType, filePath, connString string) (*models.Databa
if dbPath == "" {
return nil, fmt.Errorf("file path or connection string is required for SQLite format")
}
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
reader = sqlite.NewReader(newReaderOptions(dbPath, ""))
default:
return nil, fmt.Errorf("unsupported source format: %s", dbType)
@@ -381,14 +383,10 @@ func readDatabaseForConvert(dbType, filePath, connString string) (*models.Databa
return db, nil
}
func writeDatabase(db *models.Database, dbType, outputPath, packageName, schemaFilter string, flattenSchema bool) error {
func writeDatabase(db *models.Database, dbType, outputPath, packageName, schemaFilter string, flattenSchema bool, nullableTypes string) error {
var writer writers.Writer
writerOpts := &writers.WriterOptions{
OutputPath: outputPath,
PackageName: packageName,
FlattenSchema: flattenSchema,
}
writerOpts := newWriterOptions(outputPath, packageName, flattenSchema, nullableTypes)
switch strings.ToLower(dbType) {
case "dbml":

View File

@@ -240,62 +240,62 @@ func readDatabaseForEdit(dbType, filePath, connString, label string) (*models.Da
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for DBML format", label)
}
reader = dbml.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = dbml.NewReader(newReaderOptions(filePath, ""))
case "dctx":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for DCTX format", label)
}
reader = dctx.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = dctx.NewReader(newReaderOptions(filePath, ""))
case "drawdb":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for DrawDB format", label)
}
reader = drawdb.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = drawdb.NewReader(newReaderOptions(filePath, ""))
case "graphql":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for GraphQL format", label)
}
reader = graphql.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = graphql.NewReader(newReaderOptions(filePath, ""))
case "json":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for JSON format", label)
}
reader = json.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = json.NewReader(newReaderOptions(filePath, ""))
case "yaml":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for YAML format", label)
}
reader = yaml.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = yaml.NewReader(newReaderOptions(filePath, ""))
case "gorm":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for GORM format", label)
}
reader = gorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = gorm.NewReader(newReaderOptions(filePath, ""))
case "bun":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for Bun format", label)
}
reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = bun.NewReader(newReaderOptions(filePath, ""))
case "drizzle":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for Drizzle format", label)
}
reader = drizzle.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = drizzle.NewReader(newReaderOptions(filePath, ""))
case "prisma":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for Prisma format", label)
}
reader = prisma.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = prisma.NewReader(newReaderOptions(filePath, ""))
case "typeorm":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for TypeORM format", label)
}
reader = typeorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = typeorm.NewReader(newReaderOptions(filePath, ""))
case "pgsql":
if connString == "" {
return nil, fmt.Errorf("%s: connection string is required for PostgreSQL format", label)
}
reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
reader = pgsql.NewReader(newReaderOptions("", connString))
case "sqlite", "sqlite3":
// SQLite can use either file path or connection string
dbPath := filePath
@@ -305,7 +305,7 @@ func readDatabaseForEdit(dbType, filePath, connString, label string) (*models.Da
if dbPath == "" {
return nil, fmt.Errorf("%s: file path or connection string is required for SQLite format", label)
}
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
reader = sqlite.NewReader(newReaderOptions(dbPath, ""))
default:
return nil, fmt.Errorf("%s: unsupported format: %s", label, dbType)
}
@@ -323,31 +323,31 @@ func writeDatabaseForEdit(dbType, filePath, connString string, db *models.Databa
switch strings.ToLower(dbType) {
case "dbml":
writer = wdbml.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wdbml.NewWriter(newWriterOptions(filePath, "", false, ""))
case "dctx":
writer = wdctx.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wdctx.NewWriter(newWriterOptions(filePath, "", false, ""))
case "drawdb":
writer = wdrawdb.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wdrawdb.NewWriter(newWriterOptions(filePath, "", false, ""))
case "graphql":
writer = wgraphql.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wgraphql.NewWriter(newWriterOptions(filePath, "", false, ""))
case "json":
writer = wjson.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wjson.NewWriter(newWriterOptions(filePath, "", false, ""))
case "yaml":
writer = wyaml.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wyaml.NewWriter(newWriterOptions(filePath, "", false, ""))
case "gorm":
writer = wgorm.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wgorm.NewWriter(newWriterOptions(filePath, "", false, ""))
case "bun":
writer = wbun.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wbun.NewWriter(newWriterOptions(filePath, "", false, ""))
case "drizzle":
writer = wdrizzle.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wdrizzle.NewWriter(newWriterOptions(filePath, "", false, ""))
case "prisma":
writer = wprisma.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wprisma.NewWriter(newWriterOptions(filePath, "", false, ""))
case "typeorm":
writer = wtypeorm.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wtypeorm.NewWriter(newWriterOptions(filePath, "", false, ""))
case "sqlite", "sqlite3":
writer = wsqlite.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wsqlite.NewWriter(newWriterOptions(filePath, "", false, ""))
case "pgsql":
writer = wpgsql.NewWriter(&writers.WriterOptions{OutputPath: filePath})
writer = wpgsql.NewWriter(newWriterOptions(filePath, "", false, ""))
default:
return fmt.Errorf("%s: unsupported format: %s", label, dbType)
}

View File

@@ -221,73 +221,73 @@ func readDatabaseForInspect(dbType, filePath, connString string) (*models.Databa
if filePath == "" {
return nil, fmt.Errorf("file path is required for DBML format")
}
reader = dbml.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = dbml.NewReader(newReaderOptions(filePath, ""))
case "dctx":
if filePath == "" {
return nil, fmt.Errorf("file path is required for DCTX format")
}
reader = dctx.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = dctx.NewReader(newReaderOptions(filePath, ""))
case "drawdb":
if filePath == "" {
return nil, fmt.Errorf("file path is required for DrawDB format")
}
reader = drawdb.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = drawdb.NewReader(newReaderOptions(filePath, ""))
case "graphql":
if filePath == "" {
return nil, fmt.Errorf("file path is required for GraphQL format")
}
reader = graphql.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = graphql.NewReader(newReaderOptions(filePath, ""))
case "json":
if filePath == "" {
return nil, fmt.Errorf("file path is required for JSON format")
}
reader = json.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = json.NewReader(newReaderOptions(filePath, ""))
case "yaml", "yml":
if filePath == "" {
return nil, fmt.Errorf("file path is required for YAML format")
}
reader = yaml.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = yaml.NewReader(newReaderOptions(filePath, ""))
case "gorm":
if filePath == "" {
return nil, fmt.Errorf("file path is required for GORM format")
}
reader = gorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = gorm.NewReader(newReaderOptions(filePath, ""))
case "bun":
if filePath == "" {
return nil, fmt.Errorf("file path is required for Bun format")
}
reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = bun.NewReader(newReaderOptions(filePath, ""))
case "drizzle":
if filePath == "" {
return nil, fmt.Errorf("file path is required for Drizzle format")
}
reader = drizzle.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = drizzle.NewReader(newReaderOptions(filePath, ""))
case "prisma":
if filePath == "" {
return nil, fmt.Errorf("file path is required for Prisma format")
}
reader = prisma.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = prisma.NewReader(newReaderOptions(filePath, ""))
case "typeorm":
if filePath == "" {
return nil, fmt.Errorf("file path is required for TypeORM format")
}
reader = typeorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = typeorm.NewReader(newReaderOptions(filePath, ""))
case "pgsql", "postgres", "postgresql":
if connString == "" {
return nil, fmt.Errorf("connection string is required for PostgreSQL format")
}
reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
reader = pgsql.NewReader(newReaderOptions("", connString))
case "sqlite", "sqlite3":
// SQLite can use either file path or connection string
@@ -298,7 +298,7 @@ func readDatabaseForInspect(dbType, filePath, connString string) (*models.Databa
if dbPath == "" {
return nil, fmt.Errorf("file path or connection string is required for SQLite format")
}
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
reader = sqlite.NewReader(newReaderOptions(dbPath, ""))
default:
return nil, fmt.Errorf("unsupported database type: %s", dbType)

View File

@@ -258,6 +258,11 @@ func runMerge(cmd *cobra.Command, args []string) error {
fmt.Fprintf(os.Stderr, " ✓ Merge complete\n\n")
fmt.Fprintf(os.Stderr, "%s\n", merge.GetMergeSummary(result))
if strings.EqualFold(mergeOutputType, "pgsql") && len(result.TypeConflicts) > 0 {
return fmt.Errorf("merge detected conflicting existing column types and cannot safely continue with pgsql output\n%s",
merge.GetColumnTypeConflictSummary(result, 10))
}
// Step 4: Write output
fmt.Fprintf(os.Stderr, "\n[4/4] Writing output...\n")
fmt.Fprintf(os.Stderr, " Format: %s\n", mergeOutputType)
@@ -284,62 +289,62 @@ func readDatabaseForMerge(dbType, filePath, connString, label string) (*models.D
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for DBML format", label)
}
reader = dbml.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = dbml.NewReader(newReaderOptions(filePath, ""))
case "dctx":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for DCTX format", label)
}
reader = dctx.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = dctx.NewReader(newReaderOptions(filePath, ""))
case "drawdb":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for DrawDB format", label)
}
reader = drawdb.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = drawdb.NewReader(newReaderOptions(filePath, ""))
case "graphql":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for GraphQL format", label)
}
reader = graphql.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = graphql.NewReader(newReaderOptions(filePath, ""))
case "json":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for JSON format", label)
}
reader = json.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = json.NewReader(newReaderOptions(filePath, ""))
case "yaml":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for YAML format", label)
}
reader = yaml.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = yaml.NewReader(newReaderOptions(filePath, ""))
case "gorm":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for GORM format", label)
}
reader = gorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = gorm.NewReader(newReaderOptions(filePath, ""))
case "bun":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for Bun format", label)
}
reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = bun.NewReader(newReaderOptions(filePath, ""))
case "drizzle":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for Drizzle format", label)
}
reader = drizzle.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = drizzle.NewReader(newReaderOptions(filePath, ""))
case "prisma":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for Prisma format", label)
}
reader = prisma.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = prisma.NewReader(newReaderOptions(filePath, ""))
case "typeorm":
if filePath == "" {
return nil, fmt.Errorf("%s: file path is required for TypeORM format", label)
}
reader = typeorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
reader = typeorm.NewReader(newReaderOptions(filePath, ""))
case "pgsql":
if connString == "" {
return nil, fmt.Errorf("%s: connection string is required for PostgreSQL format", label)
}
reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
reader = pgsql.NewReader(newReaderOptions("", connString))
case "sqlite", "sqlite3":
// SQLite can use either file path or connection string
dbPath := filePath
@@ -349,7 +354,7 @@ func readDatabaseForMerge(dbType, filePath, connString, label string) (*models.D
if dbPath == "" {
return nil, fmt.Errorf("%s: file path or connection string is required for SQLite format", label)
}
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
reader = sqlite.NewReader(newReaderOptions(dbPath, ""))
default:
return nil, fmt.Errorf("%s: unsupported format '%s'", label, dbType)
}
@@ -370,61 +375,61 @@ func writeDatabaseForMerge(dbType, filePath, connString string, db *models.Datab
if filePath == "" {
return fmt.Errorf("%s: file path is required for DBML format", label)
}
writer = wdbml.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wdbml.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "dctx":
if filePath == "" {
return fmt.Errorf("%s: file path is required for DCTX format", label)
}
writer = wdctx.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wdctx.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "drawdb":
if filePath == "" {
return fmt.Errorf("%s: file path is required for DrawDB format", label)
}
writer = wdrawdb.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wdrawdb.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "graphql":
if filePath == "" {
return fmt.Errorf("%s: file path is required for GraphQL format", label)
}
writer = wgraphql.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wgraphql.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "json":
if filePath == "" {
return fmt.Errorf("%s: file path is required for JSON format", label)
}
writer = wjson.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wjson.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "yaml":
if filePath == "" {
return fmt.Errorf("%s: file path is required for YAML format", label)
}
writer = wyaml.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wyaml.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "gorm":
if filePath == "" {
return fmt.Errorf("%s: file path is required for GORM format", label)
}
writer = wgorm.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wgorm.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "bun":
if filePath == "" {
return fmt.Errorf("%s: file path is required for Bun format", label)
}
writer = wbun.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wbun.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "drizzle":
if filePath == "" {
return fmt.Errorf("%s: file path is required for Drizzle format", label)
}
writer = wdrizzle.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wdrizzle.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "prisma":
if filePath == "" {
return fmt.Errorf("%s: file path is required for Prisma format", label)
}
writer = wprisma.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wprisma.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "typeorm":
if filePath == "" {
return fmt.Errorf("%s: file path is required for TypeORM format", label)
}
writer = wtypeorm.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wtypeorm.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "sqlite", "sqlite3":
writer = wsqlite.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
writer = wsqlite.NewWriter(newWriterOptions(filePath, "", flattenSchema, ""))
case "pgsql":
writerOpts := &writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema}
writerOpts := newWriterOptions(filePath, "", flattenSchema, "")
if connString != "" {
writerOpts.Metadata = map[string]interface{}{
"connection_string": connString,

View File

@@ -3,6 +3,7 @@ package main
import (
"os"
"path/filepath"
"strings"
"testing"
)
@@ -160,3 +161,38 @@ func TestRunMerge_FromListMissingSourceType(t *testing.T) {
t.Error("expected error when neither --source-path nor --from-list is provided")
}
}
func TestRunMerge_PgsqlOutputRejectsColumnTypeConflict(t *testing.T) {
saved := saveMergeState()
defer restoreMergeState(saved)
dir := t.TempDir()
targetFile := filepath.Join(dir, "target.json")
sourceFile := filepath.Join(dir, "source.json")
writeTestJSONWithSingleColumnType(t, targetFile, "users", "integer")
writeTestJSONWithSingleColumnType(t, sourceFile, "users", "uuid")
mergeTargetType = "json"
mergeTargetPath = targetFile
mergeTargetConn = ""
mergeSourceType = "json"
mergeSourcePath = sourceFile
mergeSourceConn = ""
mergeFromList = nil
mergeOutputType = "pgsql"
mergeOutputPath = ""
mergeOutputConn = "postgres://relspec:secret@localhost/testdb"
mergeSkipTables = ""
mergeReportPath = ""
err := runMerge(nil, nil)
if err == nil {
t.Fatal("expected pgsql output merge to fail on column type conflict")
}
if !strings.Contains(err.Error(), "column type conflicts detected") {
t.Fatalf("expected conflict summary in error, got: %v", err)
}
if !strings.Contains(err.Error(), "public.users.id") {
t.Fatalf("expected conflicting column path in error, got: %v", err)
}
}

View File

@@ -0,0 +1,24 @@
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
func newReaderOptions(filePath, connString string) *readers.ReaderOptions {
return &readers.ReaderOptions{
FilePath: filePath,
ConnectionString: connString,
Prisma7: prisma7,
}
}
func newWriterOptions(outputPath, packageName string, flattenSchema bool, nullableTypes string) *writers.WriterOptions {
return &writers.WriterOptions{
OutputPath: outputPath,
PackageName: packageName,
FlattenSchema: flattenSchema,
NullableTypes: nullableTypes,
Prisma7: prisma7,
}
}

View File

@@ -12,6 +12,7 @@ var (
// Version information, set via ldflags during build
version = "dev"
buildDate = "unknown"
prisma7 bool
)
func init() {
@@ -68,4 +69,5 @@ func init() {
rootCmd.AddCommand(mergeCmd)
rootCmd.AddCommand(splitCmd)
rootCmd.AddCommand(versionCmd)
rootCmd.PersistentFlags().BoolVar(&prisma7, "prisma7", false, "Use Prisma 7 generator conventions when reading/writing Prisma schemas")
}

View File

@@ -22,6 +22,7 @@ var (
splitDatabaseName string
splitExcludeSchema string
splitExcludeTables string
splitNullableTypes string
)
var splitCmd = &cobra.Command{
@@ -110,6 +111,7 @@ func init() {
splitCmd.Flags().StringVar(&splitTables, "tables", "", "Comma-separated list of table names to include (case-insensitive)")
splitCmd.Flags().StringVar(&splitExcludeSchema, "exclude-schema", "", "Comma-separated list of schema names to exclude")
splitCmd.Flags().StringVar(&splitExcludeTables, "exclude-tables", "", "Comma-separated list of table names to exclude (case-insensitive)")
splitCmd.Flags().StringVar(&splitNullableTypes, "types", "", "Nullable type package for code-gen writers (bun/gorm): 'resolvespec' (default) or 'stdlib' (database/sql)")
err := splitCmd.MarkFlagRequired("from")
if err != nil {
@@ -185,6 +187,7 @@ func runSplit(cmd *cobra.Command, args []string) error {
splitPackageName,
"", // no schema filter for split
false, // no flatten-schema for split
splitNullableTypes,
)
if err != nil {
return fmt.Errorf("failed to write output: %w", err)

View File

@@ -71,6 +71,40 @@ func writeTestJSON(t *testing.T, path string, tableNames []string) {
}
}
func writeTestJSONWithSingleColumnType(t *testing.T, path, tableName, columnType string) {
t.Helper()
db := minimalDatabase{
Name: "test_db",
Schemas: []minimalSchema{{
Name: "public",
Tables: []minimalTable{{
Name: tableName,
Schema: "public",
Columns: map[string]minimalColumn{
"id": {
Name: "id",
Table: tableName,
Schema: "public",
Type: columnType,
NotNull: true,
IsPrimaryKey: true,
AutoIncrement: true,
},
},
}},
}},
}
data, err := json.Marshal(db)
if err != nil {
t.Fatalf("failed to marshal test JSON: %v", err)
}
if err := os.WriteFile(path, data, 0644); err != nil {
t.Fatalf("failed to write test file %s: %v", path, err)
}
}
// convertState captures and restores all convert global vars.
type convertState struct {
sourceType string

View File

@@ -1,8 +1,8 @@
# Maintainer: Hein (Warky Devs) <hein@warky.dev>
pkgname=relspec
pkgver=1.0.43
pkgver=1.0.56
pkgrel=1
pkgdesc="Database schema conversion and analysis tool"
pkgdesc="RelSpec is a comprehensive database relations management tool that reads, transforms, and writes database table specifications across multiple formats and ORMs."
arch=('x86_64' 'aarch64')
url="https://git.warky.dev/wdevs/relspecgo"
license=('MIT')

View File

@@ -1,7 +1,7 @@
Name: relspec
Version: 1.0.43
Version: 1.0.56
Release: 1%{?dist}
Summary: Database schema conversion and analysis tool
Summary: RelSpec is a comprehensive database relations management tool that reads, transforms, and writes database table specifications across multiple formats and ORMs.
License: MIT
URL: https://git.warky.dev/wdevs/relspecgo

View File

@@ -22,6 +22,16 @@ type MergeResult struct {
EnumsAdded int
ViewsAdded int
SequencesAdded int
TypeConflicts []ColumnTypeConflict
}
// ColumnTypeConflict describes a column that exists in both schemas but with incompatible types.
type ColumnTypeConflict struct {
Schema string
Table string
Column string
TargetType string
SourceType string
}
// MergeOptions contains options for merge operations
@@ -146,11 +156,19 @@ func (r *MergeResult) mergeColumns(table *models.Table, srcTable *models.Table)
// Merge columns
for colName, srcCol := range srcTable.Columns {
if _, exists := existingColumns[colName]; !exists {
if tgtCol, exists := existingColumns[colName]; !exists {
// Column doesn't exist, add it
newCol := cloneColumn(srcCol)
table.Columns[colName] = newCol
r.ColumnsAdded++
} else if columnTypeConflict(tgtCol, srcCol) {
r.TypeConflicts = append(r.TypeConflicts, ColumnTypeConflict{
Schema: firstNonEmpty(table.Schema, srcTable.Schema, srcCol.Schema),
Table: firstNonEmpty(table.Name, srcTable.Name, srcCol.Table),
Column: firstNonEmpty(tgtCol.Name, srcCol.Name, colName),
TargetType: describeColumnType(tgtCol),
SourceType: describeColumnType(srcCol),
})
}
}
}
@@ -426,6 +444,52 @@ func cloneColumn(col *models.Column) *models.Column {
return newCol
}
func columnTypeConflict(target, source *models.Column) bool {
if target == nil || source == nil {
return false
}
return normalizeType(target.Type) != normalizeType(source.Type) ||
target.Length != source.Length ||
target.Precision != source.Precision ||
target.Scale != source.Scale
}
func normalizeType(value string) string {
return strings.ToLower(strings.TrimSpace(value))
}
func describeColumnType(col *models.Column) string {
if col == nil {
return ""
}
typeName := strings.TrimSpace(col.Type)
if typeName == "" {
return ""
}
switch {
case col.Precision > 0 && col.Scale > 0:
return fmt.Sprintf("%s(%d,%d)", typeName, col.Precision, col.Scale)
case col.Precision > 0:
return fmt.Sprintf("%s(%d)", typeName, col.Precision)
case col.Length > 0:
return fmt.Sprintf("%s(%d)", typeName, col.Length)
default:
return typeName
}
}
func firstNonEmpty(values ...string) string {
for _, value := range values {
if strings.TrimSpace(value) != "" {
return value
}
}
return ""
}
func cloneConstraint(constraint *models.Constraint) *models.Constraint {
if constraint == nil {
return nil
@@ -609,6 +673,7 @@ func GetMergeSummary(result *MergeResult) string {
fmt.Sprintf("Enums added: %d", result.EnumsAdded),
fmt.Sprintf("Relations added: %d", result.RelationsAdded),
fmt.Sprintf("Domains added: %d", result.DomainsAdded),
fmt.Sprintf("Type conflicts: %d", len(result.TypeConflicts)),
}
totalAdded := result.SchemasAdded + result.TablesAdded + result.ColumnsAdded +
@@ -625,3 +690,35 @@ func GetMergeSummary(result *MergeResult) string {
return summary
}
// GetColumnTypeConflictSummary returns a short, human-readable conflict summary.
func GetColumnTypeConflictSummary(result *MergeResult, limit int) string {
if result == nil || len(result.TypeConflicts) == 0 {
return ""
}
if limit <= 0 {
limit = len(result.TypeConflicts)
}
lines := make([]string, 0, min(limit, len(result.TypeConflicts))+1)
lines = append(lines, "column type conflicts detected:")
for i, conflict := range result.TypeConflicts {
if i >= limit {
break
}
lines = append(lines, fmt.Sprintf(" - %s.%s.%s: target=%s source=%s",
conflict.Schema, conflict.Table, conflict.Column, conflict.TargetType, conflict.SourceType))
}
if len(result.TypeConflicts) > limit {
lines = append(lines, fmt.Sprintf(" ... and %d more", len(result.TypeConflicts)-limit))
}
return strings.Join(lines, "\n")
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@@ -1,6 +1,7 @@
package merge
import (
"strings"
"testing"
"git.warky.dev/wdevs/relspecgo/pkg/models"
@@ -140,6 +141,61 @@ func TestMergeColumns_NewColumn(t *testing.T) {
}
}
func TestMergeColumns_TypeConflictIsDetected(t *testing.T) {
target := &models.Database{
Schemas: []*models.Schema{
{
Name: "public",
Tables: []*models.Table{
{
Name: "users",
Schema: "public",
Columns: map[string]*models.Column{
"email": {Name: "email", Type: "varchar", Length: 255},
},
},
},
},
},
}
source := &models.Database{
Schemas: []*models.Schema{
{
Name: "public",
Tables: []*models.Table{
{
Name: "users",
Schema: "public",
Columns: map[string]*models.Column{
"email": {Name: "email", Type: "text"},
},
},
},
},
},
}
result := MergeDatabases(target, source, nil)
if len(result.TypeConflicts) != 1 {
t.Fatalf("Expected 1 type conflict, got %d", len(result.TypeConflicts))
}
conflict := result.TypeConflicts[0]
if conflict.Schema != "public" || conflict.Table != "users" || conflict.Column != "email" {
t.Fatalf("Unexpected conflict location: %+v", conflict)
}
if conflict.TargetType != "varchar(255)" {
t.Fatalf("Expected target type varchar(255), got %q", conflict.TargetType)
}
if conflict.SourceType != "text" {
t.Fatalf("Expected source type text, got %q", conflict.SourceType)
}
if got := target.Schemas[0].Tables[0].Columns["email"].Type; got != "varchar" {
t.Fatalf("Expected target column type to remain unchanged, got %q", got)
}
}
func TestMergeConstraints_NewConstraint(t *testing.T) {
target := &models.Database{
Schemas: []*models.Schema{
@@ -509,6 +565,9 @@ func TestGetMergeSummary(t *testing.T) {
ConstraintsAdded: 3,
IndexesAdded: 2,
ViewsAdded: 1,
TypeConflicts: []ColumnTypeConflict{
{Schema: "public", Table: "users", Column: "email", TargetType: "varchar(255)", SourceType: "text"},
},
}
summary := GetMergeSummary(result)
@@ -518,6 +577,9 @@ func TestGetMergeSummary(t *testing.T) {
if len(summary) < 50 {
t.Errorf("Summary seems too short: %s", summary)
}
if !strings.Contains(summary, "Type conflicts: 1") {
t.Errorf("Expected type conflict count in summary, got: %s", summary)
}
}
func TestGetMergeSummary_Nil(t *testing.T) {

85
pkg/pgsql/connection.go Normal file
View File

@@ -0,0 +1,85 @@
package pgsql
import (
"context"
"fmt"
"runtime/debug"
"strings"
"github.com/jackc/pgx/v5"
)
const (
defaultApplicationPrefix = "relspecgo"
postgresIdentifierMaxLen = 63
)
// BuildApplicationName returns a PostgreSQL application_name in the form:
// relspecgo/<version>[:<component>]
func BuildApplicationName(component string) string {
appName := fmt.Sprintf("%s/%s", defaultApplicationPrefix, relspecVersion())
component = strings.TrimSpace(component)
if component != "" {
appName = appName + ":" + component
}
if len(appName) > postgresIdentifierMaxLen {
appName = appName[:postgresIdentifierMaxLen]
}
return appName
}
// ParseConfigWithApplicationName parses a connection string and applies a default
// application_name when one is not explicitly provided by the caller.
func ParseConfigWithApplicationName(connString, component string) (*pgx.ConnConfig, error) {
cfg, err := pgx.ParseConfig(connString)
if err != nil {
return nil, err
}
if cfg.RuntimeParams == nil {
cfg.RuntimeParams = map[string]string{}
}
if strings.TrimSpace(cfg.RuntimeParams["application_name"]) == "" {
cfg.RuntimeParams["application_name"] = BuildApplicationName(component)
}
return cfg, nil
}
// Connect establishes a PostgreSQL connection with a default relspec
// application_name when the caller does not provide one in the DSN.
func Connect(ctx context.Context, connString, component string) (*pgx.Conn, error) {
cfg, err := ParseConfigWithApplicationName(connString, component)
if err != nil {
return nil, err
}
return pgx.ConnectConfig(ctx, cfg)
}
func relspecVersion() string {
info, ok := debug.ReadBuildInfo()
if !ok {
return "dev"
}
version := strings.TrimSpace(info.Main.Version)
if version != "" && version != "(devel)" {
return version
}
for _, setting := range info.Settings {
if setting.Key == "vcs.revision" {
revision := strings.TrimSpace(setting.Value)
if len(revision) >= 7 {
return revision[:7]
}
if revision != "" {
return revision
}
}
}
return "dev"
}

View File

@@ -0,0 +1,53 @@
package pgsql
import (
"strings"
"testing"
)
func TestBuildApplicationName_IncludesVersion(t *testing.T) {
got := BuildApplicationName("")
if !strings.HasPrefix(got, "relspecgo/") {
t.Fatalf("BuildApplicationName() = %q, expected prefix relspecgo/", got)
}
}
func TestBuildApplicationName_IncludesComponent(t *testing.T) {
got := BuildApplicationName("reader-pgsql")
if !strings.Contains(got, ":reader-pgsql") {
t.Fatalf("BuildApplicationName(component) = %q, expected component suffix", got)
}
}
func TestBuildApplicationName_RespectsPostgresLengthLimit(t *testing.T) {
got := BuildApplicationName(strings.Repeat("x", 200))
if len(got) > 63 {
t.Fatalf("BuildApplicationName() length = %d, expected <= 63", len(got))
}
}
func TestParseConfigWithApplicationName_AddsWhenMissing(t *testing.T) {
cfg, err := ParseConfigWithApplicationName("postgres://user:pass@localhost:5432/db", "reader-pgsql")
if err != nil {
t.Fatalf("ParseConfigWithApplicationName() error = %v", err)
}
appName := cfg.RuntimeParams["application_name"]
if appName == "" {
t.Fatal("expected application_name to be set")
}
if !strings.HasPrefix(appName, "relspecgo/") {
t.Fatalf("application_name = %q, expected relspecgo/<version> prefix", appName)
}
}
func TestParseConfigWithApplicationName_PreservesExplicitValue(t *testing.T) {
cfg, err := ParseConfigWithApplicationName("postgres://user:pass@localhost:5432/db?application_name=custom-app", "reader-pgsql")
if err != nil {
t.Fatalf("ParseConfigWithApplicationName() error = %v", err)
}
if got := cfg.RuntimeParams["application_name"]; got != "custom-app" {
t.Fatalf("application_name = %q, expected %q", got, "custom-app")
}
}

348
pkg/pgsql/types_registry.go Normal file
View File

@@ -0,0 +1,348 @@
package pgsql
import (
"sort"
"strings"
)
// TypeSpec describes PostgreSQL type capabilities used by parsers/writers.
type TypeSpec struct {
SupportsLength bool
SupportsPrecision bool
}
var postgresBaseTypes = map[string]TypeSpec{
// Numeric types
"smallint": {},
"integer": {},
"bigint": {},
"decimal": {SupportsPrecision: true},
"numeric": {SupportsPrecision: true},
"real": {},
"double precision": {},
"smallserial": {},
"serial": {},
"bigserial": {},
"money": {},
// Character types
"char": {SupportsLength: true},
"character": {SupportsLength: true},
"varchar": {SupportsLength: true},
"character varying": {SupportsLength: true},
"text": {},
"name": {},
// Binary
"bytea": {},
// Date/time
"timestamp": {SupportsPrecision: true},
"timestamp without time zone": {SupportsPrecision: true},
"timestamp with time zone": {SupportsPrecision: true},
"time": {SupportsPrecision: true},
"time without time zone": {SupportsPrecision: true},
"time with time zone": {SupportsPrecision: true},
"date": {},
"interval": {SupportsPrecision: true},
// Boolean
"boolean": {},
// Geometric
"point": {},
"line": {},
"lseg": {},
"box": {},
"path": {},
"polygon": {},
"circle": {},
// Network
"cidr": {},
"inet": {},
"macaddr": {},
"macaddr8": {},
// Bit string
"bit": {SupportsLength: true},
"bit varying": {SupportsLength: true},
"varbit": {SupportsLength: true},
// Text search
"tsvector": {},
"tsquery": {},
// UUID/XML/JSON
"uuid": {},
"xml": {},
"json": {},
"jsonb": {},
// Range
"int4range": {},
"int8range": {},
"numrange": {},
"tsrange": {},
"tstzrange": {},
"daterange": {},
"int4multirange": {},
"int8multirange": {},
"nummultirange": {},
"tsmultirange": {},
"tstzmultirange": {},
"datemultirange": {},
// Object identifier
"oid": {},
"regclass": {},
"regproc": {},
"regtype": {},
// Pseudo-ish/common built-ins seen in schemas
"record": {},
"void": {},
// Common extensions
"citext": {},
"hstore": {},
"ltree": {},
"lquery": {},
"ltxtquery": {},
"vector": {}, // pgvector: keep explicit modifier form (vector(dim))
"halfvec": {}, // pgvector: keep explicit modifier form (halfvec(dim))
"sparsevec": {}, // pgvector: keep explicit modifier form (sparsevec(dim))
}
var postgresTypeAliases = map[string]string{
// Integer aliases
"int2": "smallint",
"int4": "integer",
"int8": "bigint",
"int": "integer",
// Serial aliases
"serial2": "smallserial",
"serial4": "serial",
"serial8": "bigserial",
// Character aliases
"bpchar": "char",
// Float aliases
"float4": "real",
"float8": "double precision",
"float": "double precision",
// Time aliases
"timestamptz": "timestamp with time zone",
"timetz": "time with time zone",
// Bit alias
"varbit": "bit varying",
// Boolean alias
"bool": "boolean",
}
var postgresEquivalentBaseTypes = map[string]string{
"character varying": "varchar",
"character": "char",
"timestamp without time zone": "timestamp",
"timestamp with time zone": "timestamptz",
"time without time zone": "time",
"time with time zone": "timetz",
}
var postgresEquivalentBaseTypeVariants = map[string][]string{
"varchar": {"varchar", "character varying"},
"char": {"char", "character"},
"timestamp": {"timestamp", "timestamp without time zone"},
"timestamptz": {"timestamptz", "timestamp with time zone"},
"time": {"time", "time without time zone"},
"timetz": {"timetz", "time with time zone"},
}
// GetPostgresBaseTypes returns a sorted-ish stable list of registered base type names.
func GetPostgresBaseTypes() []string {
result := make([]string, 0, len(postgresBaseTypes))
for t := range postgresBaseTypes {
result = append(result, t)
}
sort.Strings(result)
return result
}
// GetPostgresTypes returns the registered PostgreSQL types.
// When includeArrays is true, each base type also includes an array variant ("type[]").
func GetPostgresTypes(includeArrays bool) []string {
base := GetPostgresBaseTypes()
if !includeArrays {
return base
}
result := make([]string, 0, len(base)*2)
result = append(result, base...)
for _, t := range base {
result = append(result, t+"[]")
}
return result
}
// ExtractBaseType returns the type without outer array suffixes and modifiers.
// Examples:
// - varchar(255) -> varchar
// - text[] -> text
// - numeric(10,2)[] -> numeric
func ExtractBaseType(sqlType string) string {
t := normalizeTypeToken(sqlType)
t = strings.TrimSpace(stripArraySuffixes(t))
if idx := strings.Index(t, "("); idx > 0 {
t = strings.TrimSpace(t[:idx])
}
return t
}
// ExtractBaseTypeLower is ExtractBaseType with lowercase normalization.
func ExtractBaseTypeLower(sqlType string) string {
return strings.ToLower(ExtractBaseType(sqlType))
}
// IsArrayType reports whether the SQL type has one or more [] suffixes.
func IsArrayType(sqlType string) bool {
t := normalizeTypeToken(sqlType)
return strings.HasSuffix(t, "[]")
}
// ElementType returns the underlying element type for array types.
// For non-array types, it returns the input unchanged.
func ElementType(sqlType string) string {
t := normalizeTypeToken(sqlType)
return stripArraySuffixes(t)
}
// CanonicalizeBaseType resolves aliases to canonical PostgreSQL type names.
func CanonicalizeBaseType(baseType string) string {
base := strings.ToLower(normalizeTypeToken(baseType))
if canonical, ok := postgresTypeAliases[base]; ok {
return canonical
}
return base
}
// EquivalentBaseType resolves broader SQL-equivalent spellings to a common comparable form.
func EquivalentBaseType(baseType string) string {
base := CanonicalizeBaseType(baseType)
if equivalent, ok := postgresEquivalentBaseTypes[base]; ok {
return equivalent
}
return base
}
// NormalizeEquivalentSQLType returns a normalized SQL type string suitable for equality checks.
// Equivalent spellings such as "character varying(255)" and "varchar(255)" normalize identically.
func NormalizeEquivalentSQLType(sqlType string) string {
t := normalizeTypeToken(sqlType)
if t == "" {
return ""
}
arrayDepth := 0
for strings.HasSuffix(t, "[]") {
arrayDepth++
t = strings.TrimSpace(strings.TrimSuffix(t, "[]"))
}
modifier := ""
if idx := strings.Index(t, "("); idx >= 0 {
modifier = strings.TrimSpace(t[idx:])
t = strings.TrimSpace(t[:idx])
}
base := EquivalentBaseType(t)
normalized := base + modifier
for i := 0; i < arrayDepth; i++ {
normalized += "[]"
}
return normalized
}
// EquivalentSQLTypeVariants returns equivalent PostgreSQL spellings for a SQL type.
// Examples:
// - varchar(255) -> ["varchar(255)", "character varying(255)"]
// - timestamptz -> ["timestamptz", "timestamp with time zone"]
func EquivalentSQLTypeVariants(sqlType string) []string {
t := normalizeTypeToken(sqlType)
if t == "" {
return nil
}
arrayDepth := 0
for strings.HasSuffix(t, "[]") {
arrayDepth++
t = strings.TrimSpace(strings.TrimSuffix(t, "[]"))
}
modifier := ""
if idx := strings.Index(t, "("); idx >= 0 {
modifier = strings.TrimSpace(t[idx:])
t = strings.TrimSpace(t[:idx])
}
base := EquivalentBaseType(t)
bases := postgresEquivalentBaseTypeVariants[base]
if len(bases) == 0 {
bases = []string{base}
}
seen := make(map[string]bool, len(bases))
result := make([]string, 0, len(bases))
for _, variantBase := range bases {
variant := variantBase + modifier
for i := 0; i < arrayDepth; i++ {
variant += "[]"
}
if !seen[variant] {
seen[variant] = true
result = append(result, variant)
}
}
return result
}
// IsKnownPostgresType reports whether a type (including array forms) exists in the registry.
func IsKnownPostgresType(sqlType string) bool {
base := CanonicalizeBaseType(ExtractBaseTypeLower(sqlType))
_, ok := postgresBaseTypes[base]
return ok
}
// SupportsLength reports if this SQL type accepts a single length/dimension modifier.
func SupportsLength(sqlType string) bool {
base := CanonicalizeBaseType(ExtractBaseTypeLower(sqlType))
spec, ok := postgresBaseTypes[base]
return ok && spec.SupportsLength
}
// SupportsPrecision reports if this SQL type accepts precision (and possibly scale).
func SupportsPrecision(sqlType string) bool {
base := CanonicalizeBaseType(ExtractBaseTypeLower(sqlType))
spec, ok := postgresBaseTypes[base]
return ok && spec.SupportsPrecision
}
// HasExplicitTypeModifier reports if the type already includes "(...)".
func HasExplicitTypeModifier(sqlType string) bool {
return strings.Contains(sqlType, "(")
}
func stripArraySuffixes(t string) string {
for strings.HasSuffix(t, "[]") {
t = strings.TrimSpace(strings.TrimSuffix(t, "[]"))
}
return t
}
func normalizeTypeToken(t string) string {
return strings.Join(strings.Fields(strings.TrimSpace(t)), " ")
}

View File

@@ -0,0 +1,147 @@
package pgsql
import "testing"
func TestPostgresTypeRegistry_MasterListIncludesRequestedTypes(t *testing.T) {
required := []string{
"vector",
"integer",
"citext",
}
types := make(map[string]bool)
for _, typ := range GetPostgresTypes(true) {
types[typ] = true
}
for _, typ := range required {
if !types[typ] {
t.Fatalf("master type list missing %q", typ)
}
if !types[typ+"[]"] {
t.Fatalf("master type list missing array variant %q", typ+"[]")
}
}
}
func TestPostgresTypeRegistry_TypeParsingAndCapabilities(t *testing.T) {
tests := []struct {
input string
wantBase string
wantCanonicalBase string
wantArray bool
wantKnown bool
wantLength bool
wantPrecision bool
}{
{
input: "integer[]",
wantBase: "integer",
wantCanonicalBase: "integer",
wantArray: true,
wantKnown: true,
},
{
input: "citext[]",
wantBase: "citext",
wantCanonicalBase: "citext",
wantArray: true,
wantKnown: true,
},
{
input: "vector(1536)",
wantBase: "vector",
wantCanonicalBase: "vector",
wantKnown: true,
wantLength: false,
},
{
input: "numeric(10,2)",
wantBase: "numeric",
wantCanonicalBase: "numeric",
wantKnown: true,
wantPrecision: true,
},
{
input: "int4",
wantBase: "int4",
wantCanonicalBase: "integer",
wantKnown: true,
},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
base := ExtractBaseTypeLower(tt.input)
if base != tt.wantBase {
t.Fatalf("ExtractBaseTypeLower(%q) = %q, want %q", tt.input, base, tt.wantBase)
}
canonical := CanonicalizeBaseType(base)
if canonical != tt.wantCanonicalBase {
t.Fatalf("CanonicalizeBaseType(%q) = %q, want %q", base, canonical, tt.wantCanonicalBase)
}
if IsArrayType(tt.input) != tt.wantArray {
t.Fatalf("IsArrayType(%q) = %v, want %v", tt.input, IsArrayType(tt.input), tt.wantArray)
}
if IsKnownPostgresType(tt.input) != tt.wantKnown {
t.Fatalf("IsKnownPostgresType(%q) = %v, want %v", tt.input, IsKnownPostgresType(tt.input), tt.wantKnown)
}
if SupportsLength(tt.input) != tt.wantLength {
t.Fatalf("SupportsLength(%q) = %v, want %v", tt.input, SupportsLength(tt.input), tt.wantLength)
}
if SupportsPrecision(tt.input) != tt.wantPrecision {
t.Fatalf("SupportsPrecision(%q) = %v, want %v", tt.input, SupportsPrecision(tt.input), tt.wantPrecision)
}
})
}
}
func TestNormalizeEquivalentSQLType(t *testing.T) {
tests := []struct {
input string
want string
}{
{input: "character varying(255)", want: "varchar(255)"},
{input: "varchar(255)", want: "varchar(255)"},
{input: "timestamp with time zone", want: "timestamptz"},
{input: "timestamptz", want: "timestamptz"},
{input: "time without time zone", want: "time"},
{input: "character varying(255)[]", want: "varchar(255)[]"},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
got := NormalizeEquivalentSQLType(tt.input)
if got != tt.want {
t.Fatalf("NormalizeEquivalentSQLType(%q) = %q, want %q", tt.input, got, tt.want)
}
})
}
}
func TestEquivalentSQLTypeVariants(t *testing.T) {
tests := []struct {
input string
want []string
}{
{input: "character varying(255)", want: []string{"varchar(255)", "character varying(255)"}},
{input: "timestamptz", want: []string{"timestamptz", "timestamp with time zone"}},
{input: "text[]", want: []string{"text[]"}},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
got := EquivalentSQLTypeVariants(tt.input)
if len(got) != len(tt.want) {
t.Fatalf("EquivalentSQLTypeVariants(%q) len = %d, want %d (%v)", tt.input, len(got), len(tt.want), got)
}
for i := range tt.want {
if got[i] != tt.want[i] {
t.Fatalf("EquivalentSQLTypeVariants(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i])
}
}
})
}
}

View File

@@ -12,6 +12,7 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
)
@@ -700,16 +701,22 @@ func (r *Reader) extractBunTag(tag string) string {
// parseTypeWithLength parses a type string and extracts length if present
// e.g., "varchar(255)" returns ("varchar", 255)
func (r *Reader) parseTypeWithLength(typeStr string) (baseType string, length int) {
typeStr = strings.TrimSpace(typeStr)
baseType = typeStr
// Check for type with length: varchar(255), char(10), etc.
re := regexp.MustCompile(`^([a-zA-Z\s]+)\((\d+)\)$`)
matches := re.FindStringSubmatch(typeStr)
if len(matches) == 3 {
rawBaseType := strings.TrimSpace(matches[1])
if pgsql.SupportsLength(rawBaseType) {
if _, err := fmt.Sscanf(matches[2], "%d", &length); err == nil {
baseType = strings.TrimSpace(matches[1])
baseType = pgsql.CanonicalizeBaseType(rawBaseType)
return
}
}
baseType = typeStr
}
return
}

View File

@@ -71,8 +71,11 @@ func TestReader_ReadDatabase_Simple(t *testing.T) {
if !emailCol.NotNull {
t.Error("Column 'email' should be NOT NULL (explicit 'notnull' tag)")
}
if emailCol.Type != "varchar" || emailCol.Length != 255 {
t.Errorf("Expected email type 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
if emailCol.Type != "varchar" && emailCol.Type != "varchar(255)" {
t.Errorf("Expected email type 'varchar' or 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
}
if emailCol.Length != 255 {
t.Errorf("Expected email length 255, got %d", emailCol.Length)
}
// Verify name column - primitive string type should be NOT NULL by default in Bun
@@ -356,6 +359,33 @@ func TestReader_ReadDatabase_Complex(t *testing.T) {
}
}
func TestParseTypeWithLength_PreservesExplicitTypeModifiers(t *testing.T) {
reader := &Reader{}
tests := []struct {
input string
wantType string
wantLength int
}{
{"varchar(255)", "varchar", 255},
{"character varying(120)", "character varying", 120},
{"vector(1536)", "vector(1536)", 0},
{"numeric(10,2)", "numeric(10,2)", 0},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
gotType, gotLength := reader.parseTypeWithLength(tt.input)
if gotType != tt.wantType {
t.Fatalf("parseTypeWithLength(%q) type = %q, want %q", tt.input, gotType, tt.wantType)
}
if gotLength != tt.wantLength {
t.Fatalf("parseTypeWithLength(%q) length = %d, want %d", tt.input, gotLength, tt.wantLength)
}
})
}
}
func TestReader_ReadSchema(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "bun", "simple.go"),

View File

@@ -567,25 +567,20 @@ func (r *Reader) parseDBML(content string) (*models.Database, error) {
// parseColumn parses a DBML column definition
func (r *Reader) parseColumn(line, tableName, schemaName string) (*models.Column, *models.Constraint) {
// Format: column_name type [attributes] // comment
parts := strings.Fields(line)
if len(parts) < 2 {
lineNoComment, inlineComment := splitInlineComment(line)
signature, attrs := splitColumnSignatureAndAttrs(lineNoComment)
columnName, columnType, ok := parseColumnSignature(signature)
if !ok {
return nil, nil
}
columnName := stripQuotes(parts[0])
columnType := stripQuotes(parts[1])
column := models.InitColumn(columnName, tableName, schemaName)
column.Type = columnType
var constraint *models.Constraint
// Parse attributes in brackets
if strings.Contains(line, "[") && strings.Contains(line, "]") {
attrStart := strings.Index(line, "[")
attrEnd := strings.Index(line, "]")
if attrStart < attrEnd {
attrs := line[attrStart+1 : attrEnd]
if attrs != "" {
attrList := strings.Split(attrs, ",")
for _, attr := range attrList {
@@ -660,17 +655,94 @@ func (r *Reader) parseColumn(line, tableName, schemaName string) (*models.Column
}
}
}
}
// Parse inline comment
if strings.Contains(line, "//") {
commentStart := strings.Index(line, "//")
column.Comment = strings.TrimSpace(line[commentStart+2:])
if inlineComment != "" {
column.Comment = inlineComment
}
return column, constraint
}
func splitInlineComment(line string) (content string, inlineComment string) {
commentStart := strings.Index(line, "//")
if commentStart == -1 {
return line, ""
}
return strings.TrimSpace(line[:commentStart]), strings.TrimSpace(line[commentStart+2:])
}
func splitColumnSignatureAndAttrs(line string) (signature string, attrs string) {
trimmed := strings.TrimSpace(line)
if trimmed == "" || !strings.HasSuffix(trimmed, "]") {
return trimmed, ""
}
bracketDepth := 0
for i := len(trimmed) - 1; i >= 0; i-- {
switch trimmed[i] {
case ']':
bracketDepth++
case '[':
bracketDepth--
if bracketDepth == 0 {
// DBML attributes are a trailing [ ... ] block preceded by whitespace.
// This avoids confusing array types like text[] with attribute blocks.
if i > 0 && (trimmed[i-1] == ' ' || trimmed[i-1] == '\t') {
return strings.TrimSpace(trimmed[:i]), strings.TrimSpace(trimmed[i+1 : len(trimmed)-1])
}
}
}
}
return trimmed, ""
}
func parseColumnSignature(signature string) (columnName string, columnType string, ok bool) {
signature = strings.TrimSpace(signature)
if signature == "" {
return "", "", false
}
var splitAt int
if signature[0] == '"' || signature[0] == '\'' {
quote := signature[0]
splitAt = 1
for splitAt < len(signature) {
if signature[splitAt] == quote {
splitAt++
break
}
splitAt++
}
} else {
for splitAt < len(signature) && signature[splitAt] != ' ' && signature[splitAt] != '\t' {
splitAt++
}
}
if splitAt <= 0 || splitAt >= len(signature) {
return "", "", false
}
columnName = stripQuotes(strings.TrimSpace(signature[:splitAt]))
columnType = stripWrappingQuotes(strings.TrimSpace(signature[splitAt:]))
if columnName == "" || columnType == "" {
return "", "", false
}
return columnName, columnType, true
}
func stripWrappingQuotes(s string) string {
s = strings.TrimSpace(s)
if len(s) >= 2 && ((s[0] == '"' && s[len(s)-1] == '"') || (s[0] == '\'' && s[len(s)-1] == '\'')) {
return s[1 : len(s)-1]
}
return s
}
// parseIndex parses a DBML index definition
func (r *Reader) parseIndex(line, tableName, schemaName string) *models.Index {
// Format: (columns) [attributes] OR columnname [attributes]
@@ -832,7 +904,11 @@ func (r *Reader) parseRef(refStr string) *models.Constraint {
for _, action := range actionList {
action = strings.TrimSpace(action)
if strings.HasPrefix(action, "ondelete:") {
if strings.HasPrefix(action, "delete:") {
constraint.OnDelete = strings.TrimSpace(strings.TrimPrefix(action, "delete:"))
} else if strings.HasPrefix(action, "update:") {
constraint.OnUpdate = strings.TrimSpace(strings.TrimPrefix(action, "update:"))
} else if strings.HasPrefix(action, "ondelete:") {
constraint.OnDelete = strings.TrimSpace(strings.TrimPrefix(action, "ondelete:"))
} else if strings.HasPrefix(action, "onupdate:") {
constraint.OnUpdate = strings.TrimSpace(strings.TrimPrefix(action, "onupdate:"))

View File

@@ -839,6 +839,67 @@ func TestConstraintNaming(t *testing.T) {
}
}
func TestParseColumn_PostgresTypes(t *testing.T) {
reader := &Reader{}
tests := []struct {
name string
line string
wantName string
wantType string
wantNotNull bool
wantComment string
}{
{
name: "array type with attrs",
line: "tags text[] [not null]",
wantName: "tags",
wantType: "text[]",
wantNotNull: true,
},
{
name: "vector with dimension",
line: "embedding vector(1536)",
wantName: "embedding",
wantType: "vector(1536)",
},
{
name: "multi word timestamp type",
line: "published_at timestamp with time zone",
wantName: "published_at",
wantType: "timestamp with time zone",
},
{
name: "array type with inline comment",
line: "labels varchar(20)[] // column labels",
wantName: "labels",
wantType: "varchar(20)[]",
wantComment: "column labels",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
col, _ := reader.parseColumn(tt.line, "events", "public")
if col == nil {
t.Fatalf("parseColumn() returned nil column")
}
if col.Name != tt.wantName {
t.Errorf("column name = %q, want %q", col.Name, tt.wantName)
}
if col.Type != tt.wantType {
t.Errorf("column type = %q, want %q", col.Type, tt.wantType)
}
if col.NotNull != tt.wantNotNull {
t.Errorf("column not null = %v, want %v", col.NotNull, tt.wantNotNull)
}
if col.Comment != tt.wantComment {
t.Errorf("column comment = %q, want %q", col.Comment, tt.wantComment)
}
})
}
}
func getKeys[V any](m map[string]V) []string {
keys := make([]string, 0, len(m))
for k := range m {

View File

@@ -7,6 +7,7 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
)
@@ -232,7 +233,19 @@ func (r *Reader) convertField(dctxField *models.DCTXField, tableName string) ([]
// mapDataType maps Clarion data types to SQL types
func (r *Reader) mapDataType(clarionType string, size int) (sqlType string, precision int) {
switch strings.ToUpper(clarionType) {
trimmedType := strings.TrimSpace(clarionType)
// Preserve known PostgreSQL types (including arrays and extension types)
// from DCTX input instead of coercing them to generic text.
if pgsql.IsKnownPostgresType(trimmedType) {
pgType := canonicalizePostgresType(trimmedType)
if !pgsql.HasExplicitTypeModifier(pgType) && size > 0 && pgsql.SupportsLength(pgType) {
return pgType, size
}
return pgType, 0
}
switch strings.ToUpper(trimmedType) {
case "LONG":
if size == 8 {
return "bigint", 0
@@ -306,6 +319,32 @@ func (r *Reader) mapDataType(clarionType string, size int) (sqlType string, prec
}
}
func canonicalizePostgresType(typeStr string) string {
t := strings.ToLower(strings.Join(strings.Fields(strings.TrimSpace(typeStr)), " "))
if t == "" {
return ""
}
// Handle array suffixes
arrayCount := 0
for strings.HasSuffix(t, "[]") {
arrayCount++
t = strings.TrimSpace(strings.TrimSuffix(t, "[]"))
}
// Handle optional type modifier
modifier := ""
if idx := strings.Index(t, "("); idx > 0 {
if end := strings.LastIndex(t, ")"); end > idx {
modifier = t[idx : end+1]
t = strings.TrimSpace(t[:idx])
}
}
base := pgsql.CanonicalizeBaseType(t)
return base + modifier + strings.Repeat("[]", arrayCount)
}
// processKeys processes DCTX keys and converts them to indexes and primary keys
func (r *Reader) processKeys(dctxTable *models.DCTXTable, table *models.Table, fieldGuidMap map[string]string) error {
for _, dctxKey := range dctxTable.Keys {

View File

@@ -493,3 +493,55 @@ func TestRelationships(t *testing.T) {
}
}
}
func TestMapDataType_PostgresTypes(t *testing.T) {
reader := &Reader{}
tests := []struct {
name string
inputType string
size int
wantType string
wantLength int
}{
{
name: "integer array preserved",
inputType: "integer[]",
wantType: "integer[]",
},
{
name: "citext array preserved",
inputType: "citext[]",
wantType: "citext[]",
},
{
name: "vector modifier preserved",
inputType: "vector(1536)",
wantType: "vector(1536)",
},
{
name: "alias canonicalized in array",
inputType: "int4[]",
wantType: "integer[]",
},
{
name: "varchar length from size",
inputType: "varchar",
size: 120,
wantType: "varchar",
wantLength: 120,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotType, gotLength := reader.mapDataType(tt.inputType, tt.size)
if gotType != tt.wantType {
t.Fatalf("mapDataType(%q, %d) type = %q, want %q", tt.inputType, tt.size, gotType, tt.wantType)
}
if gotLength != tt.wantLength {
t.Fatalf("mapDataType(%q, %d) length = %d, want %d", tt.inputType, tt.size, gotLength, tt.wantLength)
}
})
}
}

View File

@@ -8,6 +8,7 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
)
@@ -231,17 +232,19 @@ func (r *Reader) convertToColumn(field *drawdb.DrawDBField, tableName, schemaNam
// Parse type and dimensions
typeStr := field.Type
typeStr = strings.TrimSpace(typeStr)
column.Type = typeStr
// Try to extract length/precision from type string like "varchar(255)" or "decimal(10,2)"
if strings.Contains(typeStr, "(") {
parts := strings.Split(typeStr, "(")
column.Type = parts[0]
baseType := strings.TrimSpace(parts[0])
if len(parts) > 1 {
dimensions := strings.TrimSuffix(parts[1], ")")
if strings.Contains(dimensions, ",") {
// Precision and scale (e.g., decimal(10,2))
// Precision and scale (e.g., decimal(10,2), numeric(10,2))
if pgsql.SupportsPrecision(baseType) {
dims := strings.Split(dimensions, ",")
if precision, err := strconv.Atoi(strings.TrimSpace(dims[0])); err == nil {
column.Precision = precision
@@ -251,14 +254,17 @@ func (r *Reader) convertToColumn(field *drawdb.DrawDBField, tableName, schemaNam
column.Scale = scale
}
}
}
} else {
// Just length (e.g., varchar(255))
if pgsql.SupportsLength(baseType) {
if length, err := strconv.Atoi(dimensions); err == nil {
column.Length = length
}
}
}
}
}
column.IsPrimaryKey = field.Primary
column.NotNull = field.NotNull || field.Primary

View File

@@ -6,6 +6,7 @@ import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
)
func TestReader_ReadDatabase_Simple(t *testing.T) {
@@ -288,6 +289,61 @@ func TestReader_ReadDatabase_Complex(t *testing.T) {
}
}
func TestConvertToColumn_PreservesExplicitTypeModifiers(t *testing.T) {
reader := &Reader{}
tests := []struct {
name string
fieldType string
wantType string
wantLength int
wantPrecision int
wantScale int
}{
{
name: "varchar with length",
fieldType: "varchar(255)",
wantType: "varchar(255)",
wantLength: 255,
},
{
name: "numeric precision/scale",
fieldType: "numeric(10,2)",
wantType: "numeric(10,2)",
wantPrecision: 10,
wantScale: 2,
},
{
name: "custom vector modifier",
fieldType: "vector(1536)",
wantType: "vector(1536)",
wantLength: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
field := &drawdb.DrawDBField{
Name: tt.name,
Type: tt.fieldType,
}
col := reader.convertToColumn(field, "events", "public")
if col.Type != tt.wantType {
t.Fatalf("column type = %q, want %q", col.Type, tt.wantType)
}
if col.Length != tt.wantLength {
t.Fatalf("column length = %d, want %d", col.Length, tt.wantLength)
}
if col.Precision != tt.wantPrecision {
t.Fatalf("column precision = %d, want %d", col.Precision, tt.wantPrecision)
}
if col.Scale != tt.wantScale {
t.Fatalf("column scale = %d, want %d", col.Scale, tt.wantScale)
}
})
}
}
func TestReader_ReadSchema(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "drawdb", "simple.json"),

View File

@@ -12,6 +12,7 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
)
@@ -676,19 +677,8 @@ func (r *Reader) extractTableFromGormTag(tag string) (tablename string, schemaNa
// deriveTableName derives a table name from struct name
func (r *Reader) deriveTableName(structName string) string {
// Remove "Model" prefix if present
name := strings.TrimPrefix(structName, "Model")
// Convert PascalCase to snake_case
var result strings.Builder
for i, r := range name {
if i > 0 && r >= 'A' && r <= 'Z' {
result.WriteRune('_')
}
result.WriteRune(r)
}
return strings.ToLower(result.String())
// Remove "Model" prefix if present, use the name as-is without transformation
return strings.TrimPrefix(structName, "Model")
}
// parseColumn parses a struct field into a Column model
@@ -784,11 +774,14 @@ func (r *Reader) extractGormTag(tag string) string {
// parseTypeWithLength parses a type string and extracts length if present
// e.g., "varchar(255)" returns ("varchar", 255)
func (r *Reader) parseTypeWithLength(typeStr string) (baseType string, length int) {
typeStr = strings.TrimSpace(typeStr)
baseType = typeStr
// Check for type with length: varchar(255), char(10), etc.
// Also handle precision/scale: numeric(10,2)
if strings.Contains(typeStr, "(") {
idx := strings.Index(typeStr, "(")
baseType = strings.TrimSpace(typeStr[:idx])
rawBaseType := strings.TrimSpace(typeStr[:idx])
// Extract numbers from parentheses
parens := typeStr[idx+1:]
@@ -796,14 +789,16 @@ func (r *Reader) parseTypeWithLength(typeStr string) (baseType string, length in
parens = parens[:endIdx]
}
// For now, just handle single number (length)
if !strings.Contains(parens, ",") {
// Only treat as "length" for text-ish SQL types.
// This avoids converting custom modifiers like vector(1536) into Length.
if pgsql.SupportsLength(rawBaseType) && !strings.Contains(parens, ",") {
if _, err := fmt.Sscanf(parens, "%d", &length); err == nil {
baseType = pgsql.CanonicalizeBaseType(rawBaseType)
return
}
}
}
baseType = typeStr
return
}

View File

@@ -71,8 +71,11 @@ func TestReader_ReadDatabase_Simple(t *testing.T) {
if !emailCol.NotNull {
t.Error("Column 'email' should be NOT NULL (explicit 'not null' tag)")
}
if emailCol.Type != "varchar" || emailCol.Length != 255 {
t.Errorf("Expected email type 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
if emailCol.Type != "varchar" && emailCol.Type != "varchar(255)" {
t.Errorf("Expected email type 'varchar' or 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
}
if emailCol.Length != 255 {
t.Errorf("Expected email length 255, got %d", emailCol.Length)
}
// Verify name column - primitive string type should be NOT NULL by default
@@ -363,6 +366,33 @@ func TestReader_ReadDatabase_Complex(t *testing.T) {
}
}
func TestParseTypeWithLength_PreservesExplicitTypeModifiers(t *testing.T) {
reader := &Reader{}
tests := []struct {
input string
wantType string
wantLength int
}{
{"varchar(255)", "varchar", 255},
{"character varying(120)", "character varying", 120},
{"vector(1536)", "vector(1536)", 0},
{"numeric(10,2)", "numeric(10,2)", 0},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
gotType, gotLength := reader.parseTypeWithLength(tt.input)
if gotType != tt.wantType {
t.Fatalf("parseTypeWithLength(%q) type = %q, want %q", tt.input, gotType, tt.wantType)
}
if gotLength != tt.wantLength {
t.Fatalf("parseTypeWithLength(%q) length = %d, want %d", tt.input, gotLength, tt.wantLength)
}
})
}
}
func TestReader_ReadSchema(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "gorm", "simple.go"),

View File

@@ -89,6 +89,10 @@ postgres://user@localhost/mydb?sslmode=disable
postgres://user:pass@db.example.com:5432/production?sslmode=require
```
By default, relspec sets `application_name` to `relspecgo/<version>` for PostgreSQL
sessions so they are identifiable in `pg_stat_activity`. If you provide
`application_name` in the connection string, your explicit value is preserved.
## Extracted Information
### Tables

View File

@@ -206,8 +206,19 @@ func (r *Reader) queryColumns(schemaName string) (map[string]map[string]*models.
c.numeric_precision,
c.numeric_scale,
c.udt_name,
pg_catalog.format_type(a.atttypid, a.atttypmod) as formatted_data_type,
col_description((c.table_schema||'.'||c.table_name)::regclass, c.ordinal_position) as description
FROM information_schema.columns c
JOIN pg_catalog.pg_namespace n
ON n.nspname = c.table_schema
JOIN pg_catalog.pg_class cls
ON cls.relname = c.table_name
AND cls.relnamespace = n.oid
JOIN pg_catalog.pg_attribute a
ON a.attrelid = cls.oid
AND a.attname = c.column_name
AND a.attnum > 0
AND NOT a.attisdropped
WHERE c.table_schema = $1
ORDER BY c.table_schema, c.table_name, c.ordinal_position
`
@@ -221,12 +232,12 @@ func (r *Reader) queryColumns(schemaName string) (map[string]map[string]*models.
columnsMap := make(map[string]map[string]*models.Column)
for rows.Next() {
var schema, tableName, columnName, isNullable, dataType, udtName string
var schema, tableName, columnName, isNullable, dataType, udtName, formattedDataType string
var ordinalPosition int
var columnDefault, description *string
var charMaxLength, numPrecision, numScale *int
if err := rows.Scan(&schema, &tableName, &columnName, &ordinalPosition, &columnDefault, &isNullable, &dataType, &charMaxLength, &numPrecision, &numScale, &udtName, &description); err != nil {
if err := rows.Scan(&schema, &tableName, &columnName, &ordinalPosition, &columnDefault, &isNullable, &dataType, &charMaxLength, &numPrecision, &numScale, &udtName, &formattedDataType, &description); err != nil {
return nil, err
}
@@ -241,12 +252,12 @@ func (r *Reader) queryColumns(schemaName string) (map[string]map[string]*models.
column.AutoIncrement = true
column.Default = defaultVal
} else {
column.Default = defaultVal
column.Default = normalizePostgresDefault(defaultVal)
}
}
// Map data type, preserving serial types when detected
column.Type = r.mapDataType(dataType, udtName, hasNextval)
column.Type = r.mapDataType(dataType, udtName, formattedDataType, hasNextval)
column.NotNull = (isNullable == "NO")
column.Sequence = uint(ordinalPosition)
@@ -602,3 +613,30 @@ func (r *Reader) parseIndexDefinition(indexName, tableName, schema, indexDef str
return index, nil
}
// normalizePostgresDefault converts a raw PostgreSQL column_default expression into the
// unquoted string value that the model convention expects. PostgreSQL stores string
// literal defaults as 'value' or 'value'::type (e.g. '{}'::text[]), while every other
// reader stores the bare value so the writer can re-quote it correctly.
func normalizePostgresDefault(defaultVal string) string {
if !strings.HasPrefix(defaultVal, "'") {
return defaultVal
}
// Decode the SQL string literal: skip the leading quote, unescape '' → ', stop at
// the first unescaped closing quote (any trailing ::cast is ignored).
rest := defaultVal[1:]
var buf strings.Builder
for i := 0; i < len(rest); i++ {
if rest[i] == '\'' {
if i+1 < len(rest) && rest[i+1] == '\'' {
buf.WriteByte('\'')
i++
} else {
break
}
} else {
buf.WriteByte(rest[i])
}
}
return buf.String()
}

View File

@@ -244,7 +244,7 @@ func (r *Reader) ReadTable() (*models.Table, error) {
// connect establishes a connection to the PostgreSQL database
func (r *Reader) connect() error {
conn, err := pgx.Connect(r.ctx, r.options.ConnectionString)
conn, err := pgsql.Connect(r.ctx, r.options.ConnectionString, "reader-pgsql")
if err != nil {
return err
}
@@ -259,12 +259,14 @@ func (r *Reader) close() {
}
}
// mapDataType maps PostgreSQL data types to canonical types
func (r *Reader) mapDataType(pgType, udtName string, hasNextval bool) string {
// mapDataType maps PostgreSQL data types while preserving exact type text when available.
func (r *Reader) mapDataType(pgType, udtName, formattedType string, hasNextval bool) string {
normalizedPGType := strings.ToLower(strings.TrimSpace(pgType))
// If the column has a nextval default, it's likely a serial type
// Map to the appropriate serial type instead of the base integer type
if hasNextval {
switch strings.ToLower(pgType) {
switch normalizedPGType {
case "integer", "int", "int4":
return "serial"
case "bigint", "int8":
@@ -274,6 +276,17 @@ func (r *Reader) mapDataType(pgType, udtName string, hasNextval bool) string {
}
}
// Prefer the database-provided formatted type; this preserves arrays/custom
// types/modifiers like text[], vector(1536), numeric(10,2), etc.
if strings.TrimSpace(formattedType) != "" {
return formattedType
}
// information_schema reports arrays generically as "ARRAY" with udt_name like "_text".
if strings.EqualFold(pgType, "ARRAY") && strings.HasPrefix(udtName, "_") && len(udtName) > 1 {
return udtName[1:] + "[]"
}
// Map common PostgreSQL types
typeMap := map[string]string{
"integer": "integer",
@@ -320,7 +333,7 @@ func (r *Reader) mapDataType(pgType, udtName string, hasNextval bool) string {
}
// Try mapped type first
if mapped, exists := typeMap[pgType]; exists {
if mapped, exists := typeMap[normalizedPGType]; exists {
return mapped
}
@@ -329,8 +342,11 @@ func (r *Reader) mapDataType(pgType, udtName string, hasNextval bool) string {
return pgsql.GetSQLType(pgType)
}
// Return UDT name for custom types
// Return UDT name for custom types (including array fallback when needed)
if udtName != "" {
if strings.HasPrefix(udtName, "_") && len(udtName) > 1 {
return udtName[1:] + "[]"
}
return udtName
}

View File

@@ -175,33 +175,37 @@ func TestMapDataType(t *testing.T) {
tests := []struct {
pgType string
udtName string
formattedType string
expected string
}{
{"integer", "int4", "integer"},
{"bigint", "int8", "bigint"},
{"smallint", "int2", "smallint"},
{"character varying", "varchar", "varchar"},
{"text", "text", "text"},
{"boolean", "bool", "boolean"},
{"timestamp without time zone", "timestamp", "timestamp"},
{"timestamp with time zone", "timestamptz", "timestamptz"},
{"json", "json", "json"},
{"jsonb", "jsonb", "jsonb"},
{"uuid", "uuid", "uuid"},
{"numeric", "numeric", "numeric"},
{"real", "float4", "real"},
{"double precision", "float8", "double precision"},
{"date", "date", "date"},
{"time without time zone", "time", "time"},
{"bytea", "bytea", "bytea"},
{"unknown_type", "custom", "custom"}, // Should return UDT name
{"integer", "int4", "", "integer"},
{"bigint", "int8", "", "bigint"},
{"smallint", "int2", "", "smallint"},
{"character varying", "varchar", "", "varchar"},
{"text", "text", "", "text"},
{"boolean", "bool", "", "boolean"},
{"timestamp without time zone", "timestamp", "", "timestamp"},
{"timestamp with time zone", "timestamptz", "", "timestamptz"},
{"json", "json", "", "json"},
{"jsonb", "jsonb", "", "jsonb"},
{"uuid", "uuid", "", "uuid"},
{"numeric", "numeric", "", "numeric"},
{"real", "float4", "", "real"},
{"double precision", "float8", "", "double precision"},
{"date", "date", "", "date"},
{"time without time zone", "time", "", "time"},
{"bytea", "bytea", "", "bytea"},
{"unknown_type", "custom", "", "custom"}, // Should return UDT name
{"ARRAY", "_text", "", "text[]"},
{"USER-DEFINED", "vector", "vector(1536)", "vector(1536)"},
{"character varying", "varchar", "character varying(255)", "character varying(255)"},
}
for _, tt := range tests {
t.Run(tt.pgType, func(t *testing.T) {
result := reader.mapDataType(tt.pgType, tt.udtName, false)
result := reader.mapDataType(tt.pgType, tt.udtName, tt.formattedType, false)
if result != tt.expected {
t.Errorf("mapDataType(%s, %s) = %s, expected %s", tt.pgType, tt.udtName, result, tt.expected)
t.Errorf("mapDataType(%s, %s, %s) = %s, expected %s", tt.pgType, tt.udtName, tt.formattedType, result, tt.expected)
}
})
}
@@ -218,9 +222,9 @@ func TestMapDataType(t *testing.T) {
for _, tt := range serialTests {
t.Run(tt.pgType+"_with_nextval", func(t *testing.T) {
result := reader.mapDataType(tt.pgType, "", true)
result := reader.mapDataType(tt.pgType, "", "", true)
if result != tt.expected {
t.Errorf("mapDataType(%s, '', true) = %s, expected %s", tt.pgType, result, tt.expected)
t.Errorf("mapDataType(%s, '', '', true) = %s, expected %s", tt.pgType, result, tt.expected)
}
})
}

View File

@@ -70,6 +70,7 @@ func (r *Reader) ReadTable() (*models.Table, error) {
// parsePrisma parses Prisma schema content and returns a Database model
func (r *Reader) parsePrisma(content string) (*models.Database, error) {
db := models.InitDatabase("database")
db.SourceFormat = "prisma"
if r.options.Metadata != nil {
if name, ok := r.options.Metadata["name"].(string); ok {
@@ -139,7 +140,7 @@ func (r *Reader) parsePrisma(content string) (*models.Database, error) {
case "datasource":
r.parseDatasource(blockContent, db)
case "generator":
// We don't need to do anything with generator blocks
r.parseGenerator(blockContent, db)
case "model":
if currentTable != nil {
r.parseModelFields(blockContent, currentTable)
@@ -173,10 +174,34 @@ func (r *Reader) parsePrisma(content string) (*models.Database, error) {
// Second pass: resolve relationships
r.resolveRelationships(schema)
if db.SourceFormat == "prisma" && r.options != nil && r.options.Prisma7 {
db.SourceFormat = "prisma7"
}
db.Schemas = append(db.Schemas, schema)
return db, nil
}
func (r *Reader) parseGenerator(lines []string, db *models.Database) {
providerRegex := regexp.MustCompile(`provider\s*=\s*"([^"]+)"`)
for _, line := range lines {
if matches := providerRegex.FindStringSubmatch(line); matches != nil {
switch matches[1] {
case "prisma-client":
db.SourceFormat = "prisma7"
default:
db.SourceFormat = "prisma"
}
return
}
}
if r.options != nil && r.options.Prisma7 {
db.SourceFormat = "prisma7"
}
}
// parseDatasource extracts database type from datasource block
func (r *Reader) parseDatasource(lines []string, db *models.Database) {
providerRegex := regexp.MustCompile(`provider\s*=\s*"?(\w+)"?`)

View File

@@ -0,0 +1,77 @@
package prisma
import (
"os"
"path/filepath"
"testing"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
)
func TestReadDatabase_Prisma7GeneratorSetsSourceFormat(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
schemaPath := filepath.Join(tmpDir, "schema.prisma")
content := `datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
generator client {
provider = "prisma-client"
output = "./generated"
}
model User {
id Int @id @default(autoincrement())
}`
if err := os.WriteFile(schemaPath, []byte(content), 0644); err != nil {
t.Fatalf("failed to write schema: %v", err)
}
reader := NewReader(&readers.ReaderOptions{FilePath: schemaPath})
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase() failed: %v", err)
}
if db.SourceFormat != "prisma7" {
t.Fatalf("expected SourceFormat prisma7, got %q", db.SourceFormat)
}
}
func TestReadDatabase_Prisma7FlagSetsSourceFormatWithoutGenerator(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
schemaPath := filepath.Join(tmpDir, "schema.prisma")
content := `datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
model User {
id Int @id @default(autoincrement())
}`
if err := os.WriteFile(schemaPath, []byte(content), 0644); err != nil {
t.Fatalf("failed to write schema: %v", err)
}
reader := NewReader(&readers.ReaderOptions{
FilePath: schemaPath,
Prisma7: true,
})
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase() failed: %v", err)
}
if db.SourceFormat != "prisma7" {
t.Fatalf("expected SourceFormat prisma7 from flag, got %q", db.SourceFormat)
}
}

View File

@@ -25,6 +25,9 @@ type ReaderOptions struct {
// ConnectionString is the database connection string (for DB readers)
ConnectionString string
// Prisma7 enables Prisma 7-specific handling for Prisma schemas.
Prisma7 bool
// Additional options can be added here as needed
Metadata map[string]interface{}
}

View File

@@ -5,9 +5,11 @@ import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
)
@@ -549,6 +551,41 @@ func (r *Reader) parseColumnOptions(decorator string, column *models.Column, tab
}
}
// Preserve explicit type modifiers from options where present.
// Example: @Column({ type: 'varchar', length: 255 }) -> varchar(255)
if column.Type != "" && !strings.Contains(column.Type, "(") {
lengthRegex := regexp.MustCompile(`length:\s*(\d+)`)
precisionRegex := regexp.MustCompile(`precision:\s*(\d+)`)
scaleRegex := regexp.MustCompile(`scale:\s*(\d+)`)
baseType := strings.ToLower(strings.TrimSpace(column.Type))
if pgsql.SupportsLength(baseType) {
if matches := lengthRegex.FindStringSubmatch(content); len(matches) == 2 {
if n, err := strconv.Atoi(matches[1]); err == nil && n > 0 {
column.Length = n
column.Type = fmt.Sprintf("%s(%d)", column.Type, n)
}
}
}
if pgsql.SupportsPrecision(baseType) {
if matches := precisionRegex.FindStringSubmatch(content); len(matches) == 2 {
if p, err := strconv.Atoi(matches[1]); err == nil && p > 0 {
column.Precision = p
if sm := scaleRegex.FindStringSubmatch(content); len(sm) == 2 {
if s, err := strconv.Atoi(sm[1]); err == nil && s >= 0 {
column.Scale = s
column.Type = fmt.Sprintf("%s(%d,%d)", column.Type, p, s)
}
} else {
column.Type = fmt.Sprintf("%s(%d)", column.Type, p)
}
}
}
}
}
if strings.Contains(content, "nullable: true") || strings.Contains(content, "nullable:true") {
column.NotNull = false
}

View File

@@ -0,0 +1,60 @@
package typeorm
import (
"testing"
"git.warky.dev/wdevs/relspecgo/pkg/models"
)
func TestParseColumnOptions_PreservesTypeModifiers(t *testing.T) {
reader := &Reader{}
table := models.InitTable("users", "public")
tests := []struct {
name string
decorator string
wantType string
wantLength int
wantPrecision int
wantScale int
}{
{
name: "varchar with length",
decorator: `@Column({ type: 'varchar', length: 255 })`,
wantType: "varchar(255)",
wantLength: 255,
},
{
name: "numeric with precision and scale",
decorator: `@Column({ type: 'numeric', precision: 10, scale: 2 })`,
wantType: "numeric(10,2)",
wantPrecision: 10,
wantScale: 2,
},
{
name: "custom type with explicit modifier is preserved",
decorator: `@Column({ type: 'vector(1536)' })`,
wantType: "vector(1536)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
col := models.InitColumn("sample", table.Name, table.Schema)
reader.parseColumnOptions(tt.decorator, col, table)
if col.Type != tt.wantType {
t.Fatalf("column type = %q, want %q", col.Type, tt.wantType)
}
if col.Length != tt.wantLength {
t.Fatalf("column length = %d, want %d", col.Length, tt.wantLength)
}
if col.Precision != tt.wantPrecision {
t.Fatalf("column precision = %d, want %d", col.Precision, tt.wantPrecision)
}
if col.Scale != tt.wantScale {
t.Fatalf("column scale = %d, want %d", col.Scale, tt.wantScale)
}
})
}
}

View File

@@ -46,54 +46,67 @@ func main() {
### CLI Examples
```bash
# Generate Bun models from PostgreSQL database
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output bun \
--out-file models.go \
--package models
# Generate Bun models from a DBML schema (default: resolvespec types)
relspec convert --from dbml --from-path schema.dbml \
--to bun --to-path models.go --package models
# Convert GORM models to Bun
relspec --input gorm --in-file gorm_models.go --output bun --out-file bun_models.go
# Use standard library database/sql nullable types instead of resolvespec
relspec convert --from dbml --from-path schema.dbml \
--to bun --to-path models.go --package models \
--types stdlib
# Multi-file output
relspec --input json --in-file schema.json --output bun --out-file models/
# Explicitly select resolvespec types (same as omitting --types)
relspec convert --from pgsql --from-conn "postgres://localhost/mydb" \
--to bun --to-path models.go --package models \
--types resolvespec
# Multi-file output (one file per table)
relspec convert --from json --from-path schema.json \
--to bun --to-path models/ --package models
```
## Generated Code Example
## Generated Code Examples
### Default — resolvespec types (`--types resolvespec`)
```go
package models
import (
"time"
"database/sql"
resolvespec_common "github.com/bitechdev/ResolveSpec/pkg/spectypes"
"github.com/uptrace/bun"
)
type User struct {
bun.BaseModel `bun:"table:users,alias:u"`
ID int64 `bun:"id,pk,autoincrement" json:"id"`
Username string `bun:"username,notnull,unique" json:"username"`
Email string `bun:"email,notnull" json:"email"`
Bio sql.NullString `bun:"bio" json:"bio,omitempty"`
CreatedAt time.Time `bun:"created_at,notnull,default:now()" json:"created_at"`
// Relationships
Posts []*Post `bun:"rel:has-many,join:id=user_id" json:"posts,omitempty"`
ID int64 `bun:"id,type:uuid,pk," json:"id"`
Username string `bun:"username,type:text,notnull," json:"username"`
Email resolvespec_common.SqlString `bun:"email,type:text,nullzero," json:"email"`
Tags resolvespec_common.SqlStringArray `bun:"tags,type:text[],default:'{}',notnull," json:"tags"`
CreatedAt resolvespec_common.SqlTimeStamp `bun:"created_at,type:timestamptz,default:now(),notnull," json:"created_at"`
}
```
type Post struct {
bun.BaseModel `bun:"table:posts,alias:p"`
### Standard library — `--types stdlib`
ID int64 `bun:"id,pk" json:"id"`
UserID int64 `bun:"user_id,notnull" json:"user_id"`
Title string `bun:"title,notnull" json:"title"`
Content sql.NullString `bun:"content" json:"content,omitempty"`
```go
package models
// Belongs to
User *User `bun:"rel:belongs-to,join:user_id=id" json:"user,omitempty"`
import (
"database/sql"
"time"
"github.com/uptrace/bun"
)
type User struct {
bun.BaseModel `bun:"table:users,alias:u"`
ID string `bun:"id,type:uuid,pk," json:"id"`
Username string `bun:"username,type:text,notnull," json:"username"`
Email sql.NullString `bun:"email,type:text,nullzero," json:"email"`
Tags []string `bun:"tags,type:text[],default:'{}',notnull," json:"tags"`
CreatedAt time.Time `bun:"created_at,type:timestamptz,default:now(),notnull," json:"created_at"`
}
```
@@ -111,19 +124,68 @@ type Post struct {
## Type Mapping
| SQL Type | Go Type | Nullable Type |
|----------|---------|---------------|
| bigint | int64 | sql.NullInt64 |
| integer | int | sql.NullInt32 |
| varchar, text | string | sql.NullString |
| boolean | bool | sql.NullBool |
| timestamp | time.Time | sql.NullTime |
| numeric | float64 | sql.NullFloat64 |
The nullable type package is selected with `--types` (or `WriterOptions.NullableTypes`).
| SQL Type | NOT NULL (both) | Nullable — resolvespec | Nullable — stdlib |
|---|---|---|---|
| `bigint` | `int64` | `SqlInt64` | `sql.NullInt64` |
| `integer` | `int32` | `SqlInt32` | `sql.NullInt32` |
| `smallint` | `int16` | `SqlInt16` | `sql.NullInt16` |
| `text`, `varchar` | `string` | `SqlString` | `sql.NullString` |
| `boolean` | `bool` | `SqlBool` | `sql.NullBool` |
| `timestamp`, `timestamptz` | `time.Time`* | `SqlTimeStamp` | `sql.NullTime` |
| `numeric`, `decimal` | `float64` | `SqlFloat64` | `sql.NullFloat64` |
| `uuid` | `string` | `SqlUUID` | `sql.NullString` |
| `jsonb` | `string` | `SqlJSONB` | `sql.NullString` |
| `text[]` | `SqlStringArray` | `SqlStringArray` | `[]string` |
| `integer[]` | `SqlInt32Array` | `SqlInt32Array` | `[]int32` |
| `uuid[]` | `SqlUUIDArray` | `SqlUUIDArray` | `[]string` |
| `vector` | `SqlVector` | `SqlVector` | `[]float32` |
\* In resolvespec mode, NOT NULL timestamps use `SqlTimeStamp` (not `time.Time`) unless the base type is a simple integer or boolean. In stdlib mode, NOT NULL timestamps use `time.Time`.
## Writer Options
### NullableTypes
Controls which Go package is used for nullable column types. Set via the `--types` CLI flag or `WriterOptions.NullableTypes`:
```go
// Use resolvespec types (default — omit NullableTypes or set to "resolvespec")
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
NullableTypes: writers.NullableTypeResolveSpec,
}
// Use standard library database/sql types
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
NullableTypes: writers.NullableTypeStdlib,
}
```
### Metadata Options
```go
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
Metadata: map[string]any{
"multi_file": true, // Enable multi-file mode
"populate_refs": true, // Populate RefDatabase/RefSchema
"generate_get_id_str": true, // Generate GetIDStr() methods
},
}
```
## Notes
- Model names are derived from table names (singularized, PascalCase)
- Table aliases are auto-generated from table names
- Nullable columns use `resolvespec_common.SqlString`, `resolvespec_common.SqlTimeStamp`, etc. by default; pass `--types stdlib` to use `sql.NullString`, `sql.NullTime`, etc. instead
- Array columns use `resolvespec_common.SqlStringArray`, `resolvespec_common.SqlInt32Array`, etc. by default; `--types stdlib` produces plain Go slices (`[]string`, `[]int32`, …)
- Multi-file mode: one file per table named `sql_{schema}_{table}.go`
- Generated code is auto-formatted
- JSON tags are automatically added

View File

@@ -26,7 +26,10 @@ type ModelData struct {
Fields []*FieldData
Config *MethodConfig
PrimaryKeyField string // Name of the primary key field
PrimaryKeyType string // Go type of the primary key field
PrimaryKeyIsSQL bool // Whether PK uses SQL type (needs .Int64() call)
PrimaryKeyIsStr bool // Whether helper methods should use string IDs
PrimaryKeyIDType string // Helper method GetID/SetID/UpdateID type
IDColumnName string // Name of the ID column in database
Prefix string // 3-letter prefix
}
@@ -140,7 +143,13 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper, fl
model.IDColumnName = safeName
// Check if PK type is a SQL type (contains resolvespec_common or sql_types)
goType := typeMapper.SQLTypeToGoType(col.Type, col.NotNull)
model.PrimaryKeyType = goType
model.PrimaryKeyIsSQL = strings.Contains(goType, "resolvespec_common") || strings.Contains(goType, "sql_types")
model.PrimaryKeyIsStr = isStringLikePrimaryKeyType(goType)
model.PrimaryKeyIDType = "int64"
if model.PrimaryKeyIsStr {
model.PrimaryKeyIDType = "string"
}
break
}
}
@@ -192,6 +201,15 @@ func formatComment(description, comment string) string {
return comment
}
func isStringLikePrimaryKeyType(goType string) bool {
switch goType {
case "string", "sql.NullString", "resolvespec_common.SqlString", "resolvespec_common.SqlUUID":
return true
default:
return false
}
}
// resolveFieldNameCollision checks if a field name conflicts with generated method names
// and adds an underscore suffix if there's a collision
func resolveFieldNameCollision(fieldName string) string {

View File

@@ -44,33 +44,55 @@ func (m {{.Name}}) SchemaName() string {
{{end}}
{{if and .Config.GenerateGetID .PrimaryKeyField}}
// GetID returns the primary key value
func (m {{.Name}}) GetID() int64 {
func (m {{.Name}}) GetID() {{.PrimaryKeyIDType}} {
{{if .PrimaryKeyIsSQL -}}
{{if .PrimaryKeyIsStr -}}
return m.{{.PrimaryKeyField}}.String()
{{- else -}}
return m.{{.PrimaryKeyField}}.Int64()
{{- end}}
{{- else -}}
{{if .PrimaryKeyIsStr -}}
return m.{{.PrimaryKeyField}}
{{- else -}}
return int64(m.{{.PrimaryKeyField}})
{{- end}}
{{- end}}
}
{{end}}
{{if and .Config.GenerateGetIDStr .PrimaryKeyField}}
// GetIDStr returns the primary key as a string
func (m {{.Name}}) GetIDStr() string {
{{if .PrimaryKeyIsSQL -}}
return m.{{.PrimaryKeyField}}.String()
{{- else if .PrimaryKeyIsStr -}}
return m.{{.PrimaryKeyField}}
{{- else -}}
return fmt.Sprintf("%d", m.{{.PrimaryKeyField}})
{{- end}}
}
{{end}}
{{if and .Config.GenerateSetID .PrimaryKeyField}}
// SetID sets the primary key value
func (m {{.Name}}) SetID(newid int64) {
func (m {{.Name}}) SetID(newid {{.PrimaryKeyIDType}}) {
m.UpdateID(newid)
}
{{end}}
{{if and .Config.GenerateUpdateID .PrimaryKeyField}}
// UpdateID updates the primary key value
func (m *{{.Name}}) UpdateID(newid int64) {
func (m *{{.Name}}) UpdateID(newid {{.PrimaryKeyIDType}}) {
{{if .PrimaryKeyIsSQL -}}
m.{{.PrimaryKeyField}}.FromString(fmt.Sprintf("%d", newid))
{{if .PrimaryKeyIsStr -}}
m.{{.PrimaryKeyField}}.FromString(newid)
{{- else -}}
m.{{.PrimaryKeyField}} = int32(newid)
m.{{.PrimaryKeyField}}.FromString(fmt.Sprintf("%d", newid))
{{- end}}
{{- else -}}
{{if .PrimaryKeyIsStr -}}
m.{{.PrimaryKeyField}} = newid
{{- else -}}
m.{{.PrimaryKeyField}} = {{.PrimaryKeyType}}(newid)
{{- end}}
{{- end}}
}
{{end}}

View File

@@ -5,48 +5,55 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
// TypeMapper handles type conversions between SQL and Go types for Bun
type TypeMapper struct {
// Package alias for sql_types import
sqlTypesAlias string
typeStyle string // writers.NullableTypeResolveSpec | writers.NullableTypeStdlib
}
// NewTypeMapper creates a new TypeMapper with default settings
func NewTypeMapper() *TypeMapper {
// NewTypeMapper creates a new TypeMapper.
// typeStyle should be writers.NullableTypeResolveSpec or writers.NullableTypeStdlib;
// an empty string defaults to resolvespec.
func NewTypeMapper(typeStyle string) *TypeMapper {
if typeStyle == "" {
typeStyle = writers.NullableTypeResolveSpec
}
return &TypeMapper{
sqlTypesAlias: "resolvespec_common",
typeStyle: typeStyle,
}
}
// SQLTypeToGoType converts a SQL type to its Go equivalent
// Uses ResolveSpec common package types (all are nullable by default in Bun)
// SQLTypeToGoType converts a SQL type to its Go equivalent.
func (tm *TypeMapper) SQLTypeToGoType(sqlType string, notNull bool) string {
// Normalize SQL type (lowercase, remove length/precision)
// Array types are handled separately for both styles.
if pgsql.IsArrayType(sqlType) {
return tm.arrayGoType(tm.extractBaseType(sqlType))
}
baseType := tm.extractBaseType(sqlType)
// For Bun, we typically use resolvespec_common types for most fields
// unless they're explicitly NOT NULL and we want to avoid null handling
if tm.typeStyle == writers.NullableTypeStdlib {
if notNull {
return tm.rawGoType(baseType)
}
return tm.stdlibNullableGoType(baseType)
}
// resolvespec (default): use base Go types only for simple NOT NULL fields.
if notNull && tm.isSimpleType(baseType) {
return tm.baseGoType(baseType)
}
// Use resolvespec_common types for nullable fields
return tm.bunGoType(baseType)
}
// extractBaseType extracts the base type from a SQL type string
func (tm *TypeMapper) extractBaseType(sqlType string) string {
sqlType = strings.ToLower(strings.TrimSpace(sqlType))
// Remove everything after '('
if idx := strings.Index(sqlType, "("); idx > 0 {
sqlType = sqlType[:idx]
}
return sqlType
return pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
}
// isSimpleType checks if a type should use base Go type when NOT NULL
@@ -160,6 +167,9 @@ func (tm *TypeMapper) bunGoType(sqlType string) string {
// Other
"money": tm.sqlTypesAlias + ".SqlFloat64",
// pgvector
"vector": tm.sqlTypesAlias + ".SqlVector",
}
if goType, ok := typeMap[sqlType]; ok {
@@ -170,6 +180,123 @@ func (tm *TypeMapper) bunGoType(sqlType string) string {
return tm.sqlTypesAlias + ".SqlString"
}
// arrayGoType returns the Go type for a PostgreSQL array column.
// The baseElemType is the canonical base type (e.g. "text", "integer").
func (tm *TypeMapper) arrayGoType(baseElemType string) string {
if tm.typeStyle == writers.NullableTypeStdlib {
return tm.stdlibArrayGoType(baseElemType)
}
typeMap := map[string]string{
"text": tm.sqlTypesAlias + ".SqlStringArray", "varchar": tm.sqlTypesAlias + ".SqlStringArray",
"char": tm.sqlTypesAlias + ".SqlStringArray", "character": tm.sqlTypesAlias + ".SqlStringArray",
"citext": tm.sqlTypesAlias + ".SqlStringArray", "bpchar": tm.sqlTypesAlias + ".SqlStringArray",
"inet": tm.sqlTypesAlias + ".SqlStringArray", "cidr": tm.sqlTypesAlias + ".SqlStringArray",
"macaddr": tm.sqlTypesAlias + ".SqlStringArray",
"json": tm.sqlTypesAlias + ".SqlStringArray", "jsonb": tm.sqlTypesAlias + ".SqlStringArray",
"integer": tm.sqlTypesAlias + ".SqlInt32Array", "int": tm.sqlTypesAlias + ".SqlInt32Array",
"int4": tm.sqlTypesAlias + ".SqlInt32Array", "serial": tm.sqlTypesAlias + ".SqlInt32Array",
"smallint": tm.sqlTypesAlias + ".SqlInt16Array", "int2": tm.sqlTypesAlias + ".SqlInt16Array",
"smallserial": tm.sqlTypesAlias + ".SqlInt16Array",
"bigint": tm.sqlTypesAlias + ".SqlInt64Array", "int8": tm.sqlTypesAlias + ".SqlInt64Array",
"bigserial": tm.sqlTypesAlias + ".SqlInt64Array",
"real": tm.sqlTypesAlias + ".SqlFloat32Array", "float4": tm.sqlTypesAlias + ".SqlFloat32Array",
"double precision": tm.sqlTypesAlias + ".SqlFloat64Array", "float8": tm.sqlTypesAlias + ".SqlFloat64Array",
"numeric": tm.sqlTypesAlias + ".SqlFloat64Array", "decimal": tm.sqlTypesAlias + ".SqlFloat64Array",
"money": tm.sqlTypesAlias + ".SqlFloat64Array",
"boolean": tm.sqlTypesAlias + ".SqlBoolArray", "bool": tm.sqlTypesAlias + ".SqlBoolArray",
"uuid": tm.sqlTypesAlias + ".SqlUUIDArray",
}
if goType, ok := typeMap[baseElemType]; ok {
return goType
}
return tm.sqlTypesAlias + ".SqlStringArray"
}
// rawGoType returns the plain Go type for a NOT NULL column in stdlib mode.
func (tm *TypeMapper) rawGoType(sqlType string) string {
typeMap := map[string]string{
"integer": "int32", "int": "int32", "int4": "int32", "serial": "int32",
"smallint": "int16", "int2": "int16", "smallserial": "int16",
"bigint": "int64", "int8": "int64", "bigserial": "int64",
"boolean": "bool", "bool": "bool",
"real": "float32", "float4": "float32",
"double precision": "float64", "float8": "float64",
"numeric": "float64", "decimal": "float64", "money": "float64",
"text": "string", "varchar": "string", "char": "string",
"character": "string", "citext": "string", "bpchar": "string",
"inet": "string", "cidr": "string", "macaddr": "string",
"uuid": "string", "json": "string", "jsonb": "string",
"timestamp": "time.Time",
"timestamp without time zone": "time.Time",
"timestamp with time zone": "time.Time",
"timestamptz": "time.Time",
"date": "time.Time",
"time": "time.Time",
"time without time zone": "time.Time",
"time with time zone": "time.Time",
"timetz": "time.Time",
"bytea": "[]byte",
"vector": "[]float32",
}
if goType, ok := typeMap[sqlType]; ok {
return goType
}
return "string"
}
// stdlibNullableGoType returns the database/sql nullable type for a column.
func (tm *TypeMapper) stdlibNullableGoType(sqlType string) string {
typeMap := map[string]string{
"integer": "sql.NullInt32", "int": "sql.NullInt32", "int4": "sql.NullInt32", "serial": "sql.NullInt32",
"smallint": "sql.NullInt16", "int2": "sql.NullInt16", "smallserial": "sql.NullInt16",
"bigint": "sql.NullInt64", "int8": "sql.NullInt64", "bigserial": "sql.NullInt64",
"boolean": "sql.NullBool", "bool": "sql.NullBool",
"real": "sql.NullFloat64", "float4": "sql.NullFloat64",
"double precision": "sql.NullFloat64", "float8": "sql.NullFloat64",
"numeric": "sql.NullFloat64", "decimal": "sql.NullFloat64", "money": "sql.NullFloat64",
"text": "sql.NullString", "varchar": "sql.NullString", "char": "sql.NullString",
"character": "sql.NullString", "citext": "sql.NullString", "bpchar": "sql.NullString",
"inet": "sql.NullString", "cidr": "sql.NullString", "macaddr": "sql.NullString",
"uuid": "sql.NullString", "json": "sql.NullString", "jsonb": "sql.NullString",
"timestamp": "sql.NullTime",
"timestamp without time zone": "sql.NullTime",
"timestamp with time zone": "sql.NullTime",
"timestamptz": "sql.NullTime",
"date": "sql.NullTime",
"time": "sql.NullTime",
"time without time zone": "sql.NullTime",
"time with time zone": "sql.NullTime",
"timetz": "sql.NullTime",
"bytea": "[]byte",
"vector": "[]float32",
}
if goType, ok := typeMap[sqlType]; ok {
return goType
}
return "sql.NullString"
}
// stdlibArrayGoType returns a plain Go slice type for array columns in stdlib mode.
func (tm *TypeMapper) stdlibArrayGoType(baseElemType string) string {
typeMap := map[string]string{
"text": "[]string", "varchar": "[]string", "char": "[]string",
"character": "[]string", "citext": "[]string", "bpchar": "[]string",
"inet": "[]string", "cidr": "[]string", "macaddr": "[]string",
"uuid": "[]string", "json": "[]string", "jsonb": "[]string",
"integer": "[]int32", "int": "[]int32", "int4": "[]int32", "serial": "[]int32",
"smallint": "[]int16", "int2": "[]int16", "smallserial": "[]int16",
"bigint": "[]int64", "int8": "[]int64", "bigserial": "[]int64",
"real": "[]float32", "float4": "[]float32",
"double precision": "[]float64", "float8": "[]float64",
"numeric": "[]float64", "decimal": "[]float64", "money": "[]float64",
"boolean": "[]bool", "bool": "[]bool",
}
if goType, ok := typeMap[baseElemType]; ok {
return goType
}
return "[]string"
}
// BuildBunTag generates a complete Bun tag string for a column
// Bun format: bun:"column_name,type:type_name,pk,default:value"
func (tm *TypeMapper) BuildBunTag(column *models.Column, table *models.Table) string {
@@ -184,9 +311,11 @@ func (tm *TypeMapper) BuildBunTag(column *models.Column, table *models.Table) st
if column.Type != "" {
// Sanitize type to remove backticks
typeStr := writers.SanitizeStructTagValue(column.Type)
if column.Length > 0 {
isArray := pgsql.IsArrayType(typeStr)
hasExplicitTypeModifier := pgsql.HasExplicitTypeModifier(typeStr)
if !hasExplicitTypeModifier && !isArray && column.Length > 0 {
typeStr = fmt.Sprintf("%s(%d)", typeStr, column.Length)
} else if column.Precision > 0 {
} else if !hasExplicitTypeModifier && !isArray && column.Precision > 0 {
if column.Scale > 0 {
typeStr = fmt.Sprintf("%s(%d,%d)", typeStr, column.Precision, column.Scale)
} else {
@@ -194,6 +323,9 @@ func (tm *TypeMapper) BuildBunTag(column *models.Column, table *models.Table) st
}
}
parts = append(parts, fmt.Sprintf("type:%s", typeStr))
if isArray && tm.typeStyle == writers.NullableTypeStdlib {
parts = append(parts, "array")
}
}
// Primary key
@@ -291,11 +423,20 @@ func (tm *TypeMapper) NeedsFmtImport(generateGetIDStr bool) bool {
return generateGetIDStr
}
// GetSQLTypesImport returns the import path for sql_types (ResolveSpec common)
// GetSQLTypesImport returns the import path for the ResolveSpec spectypes package.
func (tm *TypeMapper) GetSQLTypesImport() string {
return "github.com/bitechdev/ResolveSpec/pkg/spectypes"
}
// GetNullableTypeImportLine returns the full Go import line for the nullable type
// package (ready to pass to AddImport). Returns empty string when no import is needed.
func (tm *TypeMapper) GetNullableTypeImportLine() string {
if tm.typeStyle == writers.NullableTypeStdlib {
return "\"database/sql\""
}
return fmt.Sprintf("%s \"%s\"", tm.sqlTypesAlias, tm.GetSQLTypesImport())
}
// GetBunImport returns the import path for Bun
func (tm *TypeMapper) GetBunImport() string {
return "github.com/uptrace/bun"

View File

@@ -24,7 +24,7 @@ type Writer struct {
func NewWriter(options *writers.WriterOptions) *Writer {
w := &Writer{
options: options,
typeMapper: NewTypeMapper(),
typeMapper: NewTypeMapper(options.NullableTypes),
config: LoadMethodConfigFromMetadata(options.Metadata),
}
@@ -80,8 +80,8 @@ func (w *Writer) writeSingleFile(db *models.Database) error {
// Add bun import (always needed)
templateData.AddImport(fmt.Sprintf("\"%s\"", w.typeMapper.GetBunImport()))
// Add resolvespec_common import (always needed for nullable types)
templateData.AddImport(fmt.Sprintf("resolvespec_common \"%s\"", w.typeMapper.GetSQLTypesImport()))
// Add nullable types import (resolvespec or stdlib depending on options)
templateData.AddImport(w.typeMapper.GetNullableTypeImportLine())
// Collect all models
for _, schema := range db.Schemas {
@@ -102,8 +102,8 @@ func (w *Writer) writeSingleFile(db *models.Database) error {
}
}
// Add fmt import if GetIDStr is enabled
if w.config.GenerateGetIDStr {
// Add fmt import when generated helper methods need string formatting.
if w.needsFmtImport(templateData.Models) {
templateData.AddImport("\"fmt\"")
}
@@ -177,8 +177,8 @@ func (w *Writer) writeMultiFile(db *models.Database) error {
// Add bun import
templateData.AddImport(fmt.Sprintf("\"%s\"", w.typeMapper.GetBunImport()))
// Add resolvespec_common import
templateData.AddImport(fmt.Sprintf("resolvespec_common \"%s\"", w.typeMapper.GetSQLTypesImport()))
// Add nullable types import (resolvespec or stdlib depending on options)
templateData.AddImport(w.typeMapper.GetNullableTypeImportLine())
// Create model data
modelData := NewModelData(table, schema.Name, w.typeMapper, w.options.FlattenSchema)
@@ -195,8 +195,8 @@ func (w *Writer) writeMultiFile(db *models.Database) error {
}
}
// Add fmt import if GetIDStr is enabled
if w.config.GenerateGetIDStr {
// Add fmt import when generated helper methods need string formatting.
if w.needsFmtImport(templateData.Models) {
templateData.AddImport("\"fmt\"")
}
@@ -301,6 +301,26 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
}
}
func (w *Writer) needsFmtImport(models []*ModelData) bool {
if w.config.GenerateGetIDStr {
for _, model := range models {
if model.PrimaryKeyField != "" && !model.PrimaryKeyIsSQL && !model.PrimaryKeyIsStr {
return true
}
}
}
if w.config.GenerateUpdateID {
for _, model := range models {
if model.PrimaryKeyField != "" && model.PrimaryKeyIsSQL && !model.PrimaryKeyIsStr {
return true
}
}
}
return false
}
// findTable finds a table by schema and name in the database
func (w *Writer) findTable(schemaName, tableName string, db *models.Database) *models.Table {
for _, schema := range db.Schemas {

View File

@@ -556,7 +556,7 @@ func TestWriter_FieldNameCollision(t *testing.T) {
}
func TestTypeMapper_SQLTypeToGoType_Bun(t *testing.T) {
mapper := NewTypeMapper()
mapper := NewTypeMapper("")
tests := []struct {
sqlType string
@@ -574,6 +574,10 @@ func TestTypeMapper_SQLTypeToGoType_Bun(t *testing.T) {
{"boolean", false, "resolvespec_common.SqlBool"},
{"uuid", false, "resolvespec_common.SqlUUID"},
{"jsonb", false, "resolvespec_common.SqlJSONB"},
{"text[]", true, "resolvespec_common.SqlStringArray"},
{"text[]", false, "resolvespec_common.SqlStringArray"},
{"integer[]", true, "resolvespec_common.SqlInt32Array"},
{"bigint[]", false, "resolvespec_common.SqlInt64Array"},
}
for _, tt := range tests {
@@ -586,8 +590,118 @@ func TestTypeMapper_SQLTypeToGoType_Bun(t *testing.T) {
}
}
func TestWriter_UpdateIDTypeSafety_Bun(t *testing.T) {
tests := []struct {
name string
pkType string
expectedPK string
expectedLine string
forbidInt32 bool
}{
{"int32_pk", "int", "int32", "m.ID = int32(newid)", false},
{"sql_int16_pk", "smallint", "resolvespec_common.SqlInt16", "m.ID.FromString(fmt.Sprintf(\"%d\", newid))", true},
{"int64_pk", "bigint", "int64", "m.ID = int64(newid)", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
table := models.InitTable("test_table", "public")
table.Columns["id"] = &models.Column{
Name: "id",
Type: tt.pkType,
NotNull: true,
IsPrimaryKey: true,
}
tmpDir := t.TempDir()
opts := &writers.WriterOptions{
PackageName: "models",
OutputPath: filepath.Join(tmpDir, "test.go"),
}
writer := NewWriter(opts)
err := writer.WriteTable(table)
if err != nil {
t.Fatalf("WriteTable failed: %v", err)
}
content, err := os.ReadFile(opts.OutputPath)
if err != nil {
t.Fatalf("Failed to read generated file: %v", err)
}
generated := string(content)
if !strings.Contains(generated, tt.expectedLine) {
t.Errorf("Expected UpdateID to include %s\nGenerated:\n%s", tt.expectedLine, generated)
}
if !strings.Contains(generated, "ID "+tt.expectedPK) {
t.Errorf("Expected generated primary key field type %s\nGenerated:\n%s", tt.expectedPK, generated)
}
if tt.forbidInt32 && strings.Contains(generated, "int32(newid)") {
t.Errorf("UpdateID should not cast to int32 for %s type\nGenerated:\n%s", tt.pkType, generated)
}
if !strings.Contains(generated, "UpdateID(newid int64)") {
t.Errorf("UpdateID should accept int64 parameter\nGenerated:\n%s", generated)
}
})
}
}
func TestWriter_StringPrimaryKeyHelpers_Bun(t *testing.T) {
table := models.InitTable("accounts", "public")
table.Columns["id"] = &models.Column{
Name: "id",
Type: "uuid",
NotNull: true,
IsPrimaryKey: true,
}
tmpDir := t.TempDir()
opts := &writers.WriterOptions{
PackageName: "models",
OutputPath: filepath.Join(tmpDir, "test.go"),
}
writer := NewWriter(opts)
err := writer.WriteTable(table)
if err != nil {
t.Fatalf("WriteTable failed: %v", err)
}
content, err := os.ReadFile(opts.OutputPath)
if err != nil {
t.Fatalf("Failed to read generated file: %v", err)
}
generated := string(content)
expectations := []string{
"resolvespec_common.SqlUUID",
"func (m ModelPublicAccounts) GetID() string",
"return m.ID.String()",
"func (m ModelPublicAccounts) GetIDStr() string",
"func (m ModelPublicAccounts) SetID(newid string)",
"func (m *ModelPublicAccounts) UpdateID(newid string)",
"m.ID.FromString(newid)",
}
for _, expected := range expectations {
if !strings.Contains(generated, expected) {
t.Errorf("Generated code missing expected content: %q\nGenerated:\n%s", expected, generated)
}
}
if strings.Contains(generated, "GetID() int64") || strings.Contains(generated, "UpdateID(newid int64)") {
t.Errorf("String primary keys should not use int64 helper signatures\nGenerated:\n%s", generated)
}
}
func TestTypeMapper_BuildBunTag(t *testing.T) {
mapper := NewTypeMapper()
mapper := NewTypeMapper("")
tests := []struct {
name string
@@ -685,6 +799,24 @@ func TestTypeMapper_BuildBunTag(t *testing.T) {
},
want: []string{"id,", "type:bigserial,", "pk,", "autoincrement,"},
},
{
name: "text array type",
column: &models.Column{
Name: "tags",
Type: "text[]",
NotNull: false,
},
want: []string{"tags,", "type:text[],"},
},
{
name: "integer array type",
column: &models.Column{
Name: "scores",
Type: "integer[]",
NotNull: true,
},
want: []string{"scores,", "type:integer[],"},
},
}
for _, tt := range tests {
@@ -695,6 +827,50 @@ func TestTypeMapper_BuildBunTag(t *testing.T) {
t.Errorf("BuildBunTag() = %q, missing %q", result, part)
}
}
// resolvespec mode must NOT add "array" — SqlXxxArray uses sql.Scanner
if strings.Contains(result, ",array,") || strings.HasSuffix(result, ",array,") {
t.Errorf("BuildBunTag() = %q, must not contain 'array' in resolvespec mode", result)
}
})
}
}
func TestTypeMapper_BuildBunTag_StdlibArrayHasArrayTag(t *testing.T) {
mapper := NewTypeMapper(writers.NullableTypeStdlib)
cases := []struct {
name string
column *models.Column
}{
{name: "text array", column: &models.Column{Name: "tags", Type: "text[]"}},
{name: "integer array", column: &models.Column{Name: "scores", Type: "integer[]", NotNull: true}},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
result := mapper.BuildBunTag(tt.column, nil)
if !strings.Contains(result, "array") {
t.Errorf("BuildBunTag() = %q, expected 'array' in stdlib mode", result)
}
})
}
}
func TestTypeMapper_BuildBunTag_PreservesExplicitTypeModifiers(t *testing.T) {
mapper := NewTypeMapper("")
col := &models.Column{
Name: "embedding",
Type: "vector(1536)",
Length: 1536,
Precision: 0,
Scale: 0,
}
tag := mapper.BuildBunTag(col, nil)
if !strings.Contains(tag, "type:vector(1536),") {
t.Fatalf("expected explicit modifier to be preserved, got %q", tag)
}
if strings.Contains(tag, ")(") {
t.Fatalf("type modifier appears duplicated in %q", tag)
}
}

View File

@@ -4,6 +4,7 @@ import (
"encoding/xml"
"fmt"
"os"
"sort"
"strings"
"github.com/google/uuid"
@@ -155,8 +156,15 @@ func (w *Writer) mapTableFields(table *models.Table) models.DCTXTable {
},
}
columnNames := make([]string, 0, len(table.Columns))
for name := range table.Columns {
columnNames = append(columnNames, name)
}
sort.Strings(columnNames)
i := 0
for _, column := range table.Columns {
for _, colName := range columnNames {
column := table.Columns[colName]
dctxTable.Fields[i] = w.mapField(column)
i++
}
@@ -165,12 +173,27 @@ func (w *Writer) mapTableFields(table *models.Table) models.DCTXTable {
}
func (w *Writer) mapTableKeys(table *models.Table) []models.DCTXKey {
keys := make([]models.DCTXKey, len(table.Indexes))
i := 0
indexes := make([]*models.Index, 0, len(table.Indexes))
for _, index := range table.Indexes {
keys[i] = w.mapKey(index, table)
i++
indexes = append(indexes, index)
}
// Stable ordering for deterministic output and test reproducibility:
// primary keys first, then lexicographic by index name.
sort.Slice(indexes, func(i, j int) bool {
iPrimary := strings.HasSuffix(indexes[i].Name, "_pkey")
jPrimary := strings.HasSuffix(indexes[j].Name, "_pkey")
if iPrimary != jPrimary {
return iPrimary
}
return indexes[i].Name < indexes[j].Name
})
keys := make([]models.DCTXKey, len(indexes))
for i, index := range indexes {
keys[i] = w.mapKey(index, table)
}
return keys
}

View File

@@ -5,6 +5,7 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
)
// TypeMapper handles SQL to Drizzle type conversions
@@ -18,7 +19,7 @@ func NewTypeMapper() *TypeMapper {
// SQLTypeToDrizzle converts SQL types to Drizzle column type functions
// Returns the Drizzle column constructor (e.g., "integer", "varchar", "text")
func (tm *TypeMapper) SQLTypeToDrizzle(sqlType string) string {
sqlTypeLower := strings.ToLower(sqlType)
sqlTypeLower := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
// PostgreSQL type mapping to Drizzle
typeMap := map[string]string{
@@ -87,13 +88,6 @@ func (tm *TypeMapper) SQLTypeToDrizzle(sqlType string) string {
return drizzleType
}
// Check for partial matches (e.g., "varchar(255)" -> "varchar")
for sqlPattern, drizzleType := range typeMap {
if strings.HasPrefix(sqlTypeLower, sqlPattern) {
return drizzleType
}
}
// Default to text for unknown types
return "text"
}

View File

@@ -48,22 +48,23 @@ func main() {
### CLI Examples
```bash
# Generate GORM models from PostgreSQL database (single file)
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output gorm \
--out-file models.go \
--package models
# Generate GORM models from a DBML schema (default: resolvespec types)
relspec convert --from dbml --from-path schema.dbml \
--to gorm --to-path models.go --package models
# Generate GORM models with multi-file output (one file per table)
relspec --input json \
--in-file schema.json \
--output gorm \
--out-file models/ \
--package models
# Use standard library database/sql nullable types instead of resolvespec
relspec convert --from dbml --from-path schema.dbml \
--to gorm --to-path models.go --package models \
--types stdlib
# Convert DBML to GORM models
relspec --input dbml --in-file schema.dbml --output gorm --out-file models.go
# Explicitly select resolvespec types (same as omitting --types)
relspec convert --from pgsql --from-conn "postgres://localhost/mydb" \
--to gorm --to-path models.go --package models \
--types resolvespec
# Multi-file output (one file per table)
relspec convert --from json --from-path schema.json \
--to gorm --to-path models/ --package models
```
## Output Modes
@@ -86,56 +87,84 @@ relspec --input pgsql --conn "..." --output gorm --out-file models/
Files are named: `sql_{schema}_{table}.go`
## Generated Code Example
## Generated Code Examples
### Default — resolvespec types (`--types resolvespec`)
```go
package models
import (
"time"
sql_types "git.warky.dev/wdevs/sql_types"
sql_types "github.com/bitechdev/ResolveSpec/pkg/spectypes"
)
type ModelUser struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement" json:"id"`
Username string `gorm:"column:username;type:varchar(50);not null;uniqueIndex" json:"username"`
Email string `gorm:"column:email;type:varchar(100);not null" json:"email"`
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:now()" json:"created_at"`
// Relationships
Pos []*ModelPost `gorm:"foreignKey:UserID;references:ID;constraint:OnDelete:CASCADE" json:"pos,omitempty"`
ID string `gorm:"column:id;type:uuid;primaryKey" json:"id"`
Username string `gorm:"column:username;type:text;not null" json:"username"`
Email sql_types.SqlString `gorm:"column:email;type:text" json:"email,omitempty"`
Tags sql_types.SqlStringArray `gorm:"column:tags;type:text[];not null;default:'{}'" json:"tags"`
CreatedAt sql_types.SqlTimeStamp `gorm:"column:created_at;type:timestamptz;not null;default:now()" json:"created_at"`
}
func (ModelUser) TableName() string {
return "public.users"
}
```
type ModelPost struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey" json:"id"`
UserID int64 `gorm:"column:user_id;type:bigint;not null" json:"user_id"`
Title string `gorm:"column:title;type:varchar(200);not null" json:"title"`
Content sql_types.SqlString `gorm:"column:content;type:text" json:"content,omitempty"`
### Standard library — `--types stdlib`
// Belongs to
Use *ModelUser `gorm:"foreignKey:UserID;references:ID" json:"use,omitempty"`
```go
package models
import (
"database/sql"
"time"
)
type ModelUser struct {
ID string `gorm:"column:id;type:uuid;primaryKey" json:"id"`
Username string `gorm:"column:username;type:text;not null" json:"username"`
Email sql.NullString `gorm:"column:email;type:text" json:"email,omitempty"`
Tags []string `gorm:"column:tags;type:text[];not null;default:'{}'" json:"tags"`
CreatedAt time.Time `gorm:"column:created_at;type:timestamptz;not null;default:now()" json:"created_at"`
}
func (ModelPost) TableName() string {
return "public.posts"
func (ModelUser) TableName() string {
return "public.users"
}
```
## Writer Options
### NullableTypes
Controls which Go package is used for nullable column types. Set via the `--types` CLI flag or `WriterOptions.NullableTypes`:
```go
// Use resolvespec types (default — omit NullableTypes or set to "resolvespec")
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
NullableTypes: writers.NullableTypeResolveSpec,
}
// Use standard library database/sql types
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
NullableTypes: writers.NullableTypeStdlib,
}
```
### Metadata Options
Configure the writer behavior using metadata in `WriterOptions`:
Configure additional writer behavior using metadata in `WriterOptions`:
```go
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
Metadata: map[string]interface{}{
Metadata: map[string]any{
"multi_file": true, // Enable multi-file mode
"populate_refs": true, // Populate RefDatabase/RefSchema
"generate_get_id_str": true, // Generate GetIDStr() methods
@@ -145,18 +174,23 @@ options := &writers.WriterOptions{
## Type Mapping
| SQL Type | Go Type | Notes |
|----------|---------|-------|
| bigint, int8 | int64 | - |
| integer, int, int4 | int | - |
| smallint, int2 | int16 | - |
| varchar, text | string | Not nullable |
| varchar, text (nullable) | sql_types.SqlString | Nullable |
| boolean, bool | bool | - |
| timestamp, timestamptz | time.Time | - |
| numeric, decimal | float64 | - |
| uuid | string | - |
| json, jsonb | string | - |
The nullable type package is selected with `--types` (or `WriterOptions.NullableTypes`).
| SQL Type | NOT NULL — both | Nullable — resolvespec | Nullable — stdlib |
|---|---|---|---|
| `bigint` | `int64` | `SqlInt64` | `sql.NullInt64` |
| `integer` | `int32` | `SqlInt32` | `sql.NullInt32` |
| `smallint` | `int16` | `SqlInt16` | `sql.NullInt16` |
| `text`, `varchar` | `string` | `SqlString` | `sql.NullString` |
| `boolean` | `bool` | `SqlBool` | `sql.NullBool` |
| `timestamp`, `timestamptz` | `time.Time` | `SqlTimeStamp` | `sql.NullTime` |
| `numeric`, `decimal` | `float64` | `SqlFloat64` | `sql.NullFloat64` |
| `uuid` | `string` | `SqlUUID` | `sql.NullString` |
| `jsonb` | `string` | `SqlString` | `sql.NullString` |
| `text[]` | `SqlStringArray` | `SqlStringArray` | `[]string` |
| `integer[]` | `SqlInt32Array` | `SqlInt32Array` | `[]int32` |
| `uuid[]` | `SqlUUIDArray` | `SqlUUIDArray` | `[]string` |
| `vector` | `SqlVector` | `SqlVector` | `[]float32` |
## Relationship Generation
@@ -170,7 +204,8 @@ The writer automatically generates relationship fields:
## Notes
- Model names are prefixed with "Model" (e.g., `ModelUser`)
- Nullable columns use `sql_types.SqlString`, `sql_types.SqlInt64`, etc.
- Nullable columns use `sql_types.SqlString`, `sql_types.SqlInt64`, etc. by default; pass `--types stdlib` to use `sql.NullString`, `sql.NullInt64`, etc. instead
- Array columns use `sql_types.SqlStringArray`, `sql_types.SqlInt32Array`, etc. by default; `--types stdlib` produces plain Go slices (`[]string`, `[]int32`, …)
- Generated code is auto-formatted with `go fmt`
- JSON tags are automatically added
- Supports schema-qualified table names in `TableName()` method

View File

@@ -2,6 +2,7 @@ package gorm
import (
"sort"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
@@ -26,6 +27,9 @@ type ModelData struct {
Config *MethodConfig
PrimaryKeyField string // Name of the primary key field
PrimaryKeyType string // Go type of the primary key field
PrimaryKeyIsSQL bool // Whether PK uses a SQL wrapper type
PrimaryKeyIsStr bool // Whether helper methods should use string IDs
PrimaryKeyIDType string // Helper method GetID/SetID/UpdateID type
IDColumnName string // Name of the ID column in database
Prefix string // 3-letter prefix
}
@@ -136,7 +140,14 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper, fl
// Sanitize column name to remove backticks
safeName := writers.SanitizeStructTagValue(col.Name)
model.PrimaryKeyField = SnakeCaseToPascalCase(safeName)
model.PrimaryKeyType = typeMapper.SQLTypeToGoType(col.Type, col.NotNull)
goType := typeMapper.SQLTypeToGoType(col.Type, col.NotNull)
model.PrimaryKeyType = goType
model.PrimaryKeyIsSQL = strings.Contains(goType, "sql_types.") || strings.Contains(goType, "sql.")
model.PrimaryKeyIsStr = isStringLikePrimaryKeyType(goType)
model.PrimaryKeyIDType = "int64"
if model.PrimaryKeyIsStr {
model.PrimaryKeyIDType = "string"
}
model.IDColumnName = safeName
break
}
@@ -189,6 +200,15 @@ func formatComment(description, comment string) string {
return comment
}
func isStringLikePrimaryKeyType(goType string) bool {
switch goType {
case "string", "sql.NullString", "sql_types.SqlString", "sql_types.SqlUUID":
return true
default:
return false
}
}
// resolveFieldNameCollision checks if a field name conflicts with generated method names
// and adds an underscore suffix if there's a collision
func resolveFieldNameCollision(fieldName string) string {

View File

@@ -43,26 +43,56 @@ func (m {{.Name}}) SchemaName() string {
{{end}}
{{if and .Config.GenerateGetID .PrimaryKeyField}}
// GetID returns the primary key value
func (m {{.Name}}) GetID() int64 {
func (m {{.Name}}) GetID() {{.PrimaryKeyIDType}} {
{{if .PrimaryKeyIsSQL -}}
{{if .PrimaryKeyIsStr -}}
return m.{{.PrimaryKeyField}}.String()
{{- else -}}
return m.{{.PrimaryKeyField}}.Int64()
{{- end}}
{{- else -}}
{{if .PrimaryKeyIsStr -}}
return m.{{.PrimaryKeyField}}
{{- else -}}
return int64(m.{{.PrimaryKeyField}})
{{- end}}
{{- end}}
}
{{end}}
{{if and .Config.GenerateGetIDStr .PrimaryKeyField}}
// GetIDStr returns the primary key as a string
func (m {{.Name}}) GetIDStr() string {
{{if .PrimaryKeyIsSQL -}}
return m.{{.PrimaryKeyField}}.String()
{{- else if .PrimaryKeyIsStr -}}
return m.{{.PrimaryKeyField}}
{{- else -}}
return fmt.Sprintf("%d", m.{{.PrimaryKeyField}})
{{- end}}
}
{{end}}
{{if and .Config.GenerateSetID .PrimaryKeyField}}
// SetID sets the primary key value
func (m {{.Name}}) SetID(newid int64) {
func (m {{.Name}}) SetID(newid {{.PrimaryKeyIDType}}) {
m.UpdateID(newid)
}
{{end}}
{{if and .Config.GenerateUpdateID .PrimaryKeyField}}
// UpdateID updates the primary key value
func (m *{{.Name}}) UpdateID(newid int64) {
func (m *{{.Name}}) UpdateID(newid {{.PrimaryKeyIDType}}) {
{{if .PrimaryKeyIsSQL -}}
{{if .PrimaryKeyIsStr -}}
m.{{.PrimaryKeyField}}.FromString(newid)
{{- else -}}
m.{{.PrimaryKeyField}}.FromString(fmt.Sprintf("%d", newid))
{{- end}}
{{- else -}}
{{if .PrimaryKeyIsStr -}}
m.{{.PrimaryKeyField}} = newid
{{- else -}}
m.{{.PrimaryKeyField}} = {{.PrimaryKeyType}}(newid)
{{- end}}
{{- end}}
}
{{end}}
{{if and .Config.GenerateGetIDName .IDColumnName}}

View File

@@ -5,48 +5,56 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
// TypeMapper handles type conversions between SQL and Go types
type TypeMapper struct {
// Package alias for sql_types import
sqlTypesAlias string
typeStyle string // writers.NullableTypeResolveSpec | writers.NullableTypeStdlib
}
// NewTypeMapper creates a new TypeMapper with default settings
func NewTypeMapper() *TypeMapper {
// NewTypeMapper creates a new TypeMapper.
// typeStyle should be writers.NullableTypeResolveSpec or writers.NullableTypeStdlib;
// an empty string defaults to resolvespec.
func NewTypeMapper(typeStyle string) *TypeMapper {
if typeStyle == "" {
typeStyle = writers.NullableTypeResolveSpec
}
return &TypeMapper{
sqlTypesAlias: "sql_types",
typeStyle: typeStyle,
}
}
// SQLTypeToGoType converts a SQL type to its Go equivalent
// Handles nullable types using ResolveSpec sql_types package
// SQLTypeToGoType converts a SQL type to its Go equivalent.
func (tm *TypeMapper) SQLTypeToGoType(sqlType string, notNull bool) string {
// Normalize SQL type (lowercase, remove length/precision)
// Array types are handled separately for both styles.
if pgsql.IsArrayType(sqlType) {
return tm.arrayGoType(tm.extractBaseType(sqlType))
}
baseType := tm.extractBaseType(sqlType)
// If not null, use base Go types
if tm.typeStyle == writers.NullableTypeStdlib {
if notNull {
return tm.rawGoType(baseType)
}
return tm.stdlibNullableGoType(baseType)
}
// resolvespec (default)
if notNull {
return tm.baseGoType(baseType)
}
// For nullable fields, use sql_types
return tm.nullableGoType(baseType)
}
// extractBaseType extracts the base type from a SQL type string
// Examples: varchar(100) → varchar, numeric(10,2) → numeric
func (tm *TypeMapper) extractBaseType(sqlType string) string {
sqlType = strings.ToLower(strings.TrimSpace(sqlType))
// Remove everything after '('
if idx := strings.Index(sqlType, "("); idx > 0 {
sqlType = sqlType[:idx]
}
return sqlType
return pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
}
// baseGoType returns the base Go type for a SQL type (not null)
@@ -112,6 +120,9 @@ func (tm *TypeMapper) baseGoType(sqlType string) string {
// Other
"money": "float64",
// pgvector — always uses SqlVector even when NOT NULL
"vector": tm.sqlTypesAlias + ".SqlVector",
}
if goType, ok := typeMap[sqlType]; ok {
@@ -185,6 +196,9 @@ func (tm *TypeMapper) nullableGoType(sqlType string) string {
// Other
"money": tm.sqlTypesAlias + ".SqlFloat64",
// pgvector
"vector": tm.sqlTypesAlias + ".SqlVector",
}
if goType, ok := typeMap[sqlType]; ok {
@@ -195,6 +209,123 @@ func (tm *TypeMapper) nullableGoType(sqlType string) string {
return tm.sqlTypesAlias + ".SqlString"
}
// arrayGoType returns the Go type for a PostgreSQL array column.
// The baseElemType is the canonical base type (e.g. "text", "integer").
func (tm *TypeMapper) arrayGoType(baseElemType string) string {
if tm.typeStyle == writers.NullableTypeStdlib {
return tm.stdlibArrayGoType(baseElemType)
}
typeMap := map[string]string{
"text": tm.sqlTypesAlias + ".SqlStringArray", "varchar": tm.sqlTypesAlias + ".SqlStringArray",
"char": tm.sqlTypesAlias + ".SqlStringArray", "character": tm.sqlTypesAlias + ".SqlStringArray",
"citext": tm.sqlTypesAlias + ".SqlStringArray", "bpchar": tm.sqlTypesAlias + ".SqlStringArray",
"inet": tm.sqlTypesAlias + ".SqlStringArray", "cidr": tm.sqlTypesAlias + ".SqlStringArray",
"macaddr": tm.sqlTypesAlias + ".SqlStringArray",
"json": tm.sqlTypesAlias + ".SqlStringArray", "jsonb": tm.sqlTypesAlias + ".SqlStringArray",
"integer": tm.sqlTypesAlias + ".SqlInt32Array", "int": tm.sqlTypesAlias + ".SqlInt32Array",
"int4": tm.sqlTypesAlias + ".SqlInt32Array", "serial": tm.sqlTypesAlias + ".SqlInt32Array",
"smallint": tm.sqlTypesAlias + ".SqlInt16Array", "int2": tm.sqlTypesAlias + ".SqlInt16Array",
"smallserial": tm.sqlTypesAlias + ".SqlInt16Array",
"bigint": tm.sqlTypesAlias + ".SqlInt64Array", "int8": tm.sqlTypesAlias + ".SqlInt64Array",
"bigserial": tm.sqlTypesAlias + ".SqlInt64Array",
"real": tm.sqlTypesAlias + ".SqlFloat32Array", "float4": tm.sqlTypesAlias + ".SqlFloat32Array",
"double precision": tm.sqlTypesAlias + ".SqlFloat64Array", "float8": tm.sqlTypesAlias + ".SqlFloat64Array",
"numeric": tm.sqlTypesAlias + ".SqlFloat64Array", "decimal": tm.sqlTypesAlias + ".SqlFloat64Array",
"money": tm.sqlTypesAlias + ".SqlFloat64Array",
"boolean": tm.sqlTypesAlias + ".SqlBoolArray", "bool": tm.sqlTypesAlias + ".SqlBoolArray",
"uuid": tm.sqlTypesAlias + ".SqlUUIDArray",
}
if goType, ok := typeMap[baseElemType]; ok {
return goType
}
return tm.sqlTypesAlias + ".SqlStringArray"
}
// rawGoType returns the plain Go type for a NOT NULL column in stdlib mode.
func (tm *TypeMapper) rawGoType(sqlType string) string {
typeMap := map[string]string{
"integer": "int32", "int": "int32", "int4": "int32", "serial": "int32",
"smallint": "int16", "int2": "int16", "smallserial": "int16",
"bigint": "int64", "int8": "int64", "bigserial": "int64",
"boolean": "bool", "bool": "bool",
"real": "float32", "float4": "float32",
"double precision": "float64", "float8": "float64",
"numeric": "float64", "decimal": "float64", "money": "float64",
"text": "string", "varchar": "string", "char": "string",
"character": "string", "citext": "string", "bpchar": "string",
"inet": "string", "cidr": "string", "macaddr": "string",
"uuid": "string", "json": "string", "jsonb": "string",
"timestamp": "time.Time",
"timestamp without time zone": "time.Time",
"timestamp with time zone": "time.Time",
"timestamptz": "time.Time",
"date": "time.Time",
"time": "time.Time",
"time without time zone": "time.Time",
"time with time zone": "time.Time",
"timetz": "time.Time",
"bytea": "[]byte",
"vector": "[]float32",
}
if goType, ok := typeMap[sqlType]; ok {
return goType
}
return "string"
}
// stdlibNullableGoType returns the database/sql nullable type for a column.
func (tm *TypeMapper) stdlibNullableGoType(sqlType string) string {
typeMap := map[string]string{
"integer": "sql.NullInt32", "int": "sql.NullInt32", "int4": "sql.NullInt32", "serial": "sql.NullInt32",
"smallint": "sql.NullInt16", "int2": "sql.NullInt16", "smallserial": "sql.NullInt16",
"bigint": "sql.NullInt64", "int8": "sql.NullInt64", "bigserial": "sql.NullInt64",
"boolean": "sql.NullBool", "bool": "sql.NullBool",
"real": "sql.NullFloat64", "float4": "sql.NullFloat64",
"double precision": "sql.NullFloat64", "float8": "sql.NullFloat64",
"numeric": "sql.NullFloat64", "decimal": "sql.NullFloat64", "money": "sql.NullFloat64",
"text": "sql.NullString", "varchar": "sql.NullString", "char": "sql.NullString",
"character": "sql.NullString", "citext": "sql.NullString", "bpchar": "sql.NullString",
"inet": "sql.NullString", "cidr": "sql.NullString", "macaddr": "sql.NullString",
"uuid": "sql.NullString", "json": "sql.NullString", "jsonb": "sql.NullString",
"timestamp": "sql.NullTime",
"timestamp without time zone": "sql.NullTime",
"timestamp with time zone": "sql.NullTime",
"timestamptz": "sql.NullTime",
"date": "sql.NullTime",
"time": "sql.NullTime",
"time without time zone": "sql.NullTime",
"time with time zone": "sql.NullTime",
"timetz": "sql.NullTime",
"bytea": "[]byte",
"vector": "[]float32",
}
if goType, ok := typeMap[sqlType]; ok {
return goType
}
return "sql.NullString"
}
// stdlibArrayGoType returns a plain Go slice type for array columns in stdlib mode.
func (tm *TypeMapper) stdlibArrayGoType(baseElemType string) string {
typeMap := map[string]string{
"text": "[]string", "varchar": "[]string", "char": "[]string",
"character": "[]string", "citext": "[]string", "bpchar": "[]string",
"inet": "[]string", "cidr": "[]string", "macaddr": "[]string",
"uuid": "[]string", "json": "[]string", "jsonb": "[]string",
"integer": "[]int32", "int": "[]int32", "int4": "[]int32", "serial": "[]int32",
"smallint": "[]int16", "int2": "[]int16", "smallserial": "[]int16",
"bigint": "[]int64", "int8": "[]int64", "bigserial": "[]int64",
"real": "[]float32", "float4": "[]float32",
"double precision": "[]float64", "float8": "[]float64",
"numeric": "[]float64", "decimal": "[]float64", "money": "[]float64",
"boolean": "[]bool", "bool": "[]bool",
}
if goType, ok := typeMap[baseElemType]; ok {
return goType
}
return "[]string"
}
// BuildGormTag generates a complete GORM tag string for a column
func (tm *TypeMapper) BuildGormTag(column *models.Column, table *models.Table) string {
var parts []string
@@ -209,9 +340,10 @@ func (tm *TypeMapper) BuildGormTag(column *models.Column, table *models.Table) s
// Include length, precision, scale if present
// Sanitize type to remove backticks
typeStr := writers.SanitizeStructTagValue(column.Type)
if column.Length > 0 {
hasExplicitTypeModifier := pgsql.HasExplicitTypeModifier(typeStr)
if !hasExplicitTypeModifier && column.Length > 0 {
typeStr = fmt.Sprintf("%s(%d)", typeStr, column.Length)
} else if column.Precision > 0 {
} else if !hasExplicitTypeModifier && column.Precision > 0 {
if column.Scale > 0 {
typeStr = fmt.Sprintf("%s(%d,%d)", typeStr, column.Precision, column.Scale)
} else {
@@ -335,7 +467,16 @@ func (tm *TypeMapper) NeedsFmtImport(generateGetIDStr bool) bool {
return generateGetIDStr
}
// GetSQLTypesImport returns the import path for sql_types
// GetSQLTypesImport returns the import path for the ResolveSpec spectypes package.
func (tm *TypeMapper) GetSQLTypesImport() string {
return "github.com/bitechdev/ResolveSpec/pkg/spectypes"
}
// GetNullableTypeImportLine returns the full Go import line for the nullable type
// package (ready to pass to AddImport). Returns empty string when no import is needed.
func (tm *TypeMapper) GetNullableTypeImportLine() string {
if tm.typeStyle == writers.NullableTypeStdlib {
return "\"database/sql\""
}
return fmt.Sprintf("%s \"%s\"", tm.sqlTypesAlias, tm.GetSQLTypesImport())
}

View File

@@ -24,7 +24,7 @@ type Writer struct {
func NewWriter(options *writers.WriterOptions) *Writer {
w := &Writer{
options: options,
typeMapper: NewTypeMapper(),
typeMapper: NewTypeMapper(options.NullableTypes),
config: LoadMethodConfigFromMetadata(options.Metadata),
}
@@ -77,8 +77,8 @@ func (w *Writer) writeSingleFile(db *models.Database) error {
packageName := w.getPackageName()
templateData := NewTemplateData(packageName, w.config)
// Add sql_types import (always needed for nullable types)
templateData.AddImport(fmt.Sprintf("sql_types \"%s\"", w.typeMapper.GetSQLTypesImport()))
// Add nullable types import (resolvespec or stdlib depending on options)
templateData.AddImport(w.typeMapper.GetNullableTypeImportLine())
// Collect all models
for _, schema := range db.Schemas {
@@ -99,8 +99,8 @@ func (w *Writer) writeSingleFile(db *models.Database) error {
}
}
// Add fmt import if GetIDStr is enabled
if w.config.GenerateGetIDStr {
// Add fmt import when generated helper methods need string formatting.
if w.needsFmtImport(templateData.Models) {
templateData.AddImport("\"fmt\"")
}
@@ -171,8 +171,8 @@ func (w *Writer) writeMultiFile(db *models.Database) error {
// Create template data for this single table
templateData := NewTemplateData(packageName, w.config)
// Add sql_types import
templateData.AddImport(fmt.Sprintf("sql_types \"%s\"", w.typeMapper.GetSQLTypesImport()))
// Add nullable types import (resolvespec or stdlib depending on options)
templateData.AddImport(w.typeMapper.GetNullableTypeImportLine())
// Create model data
modelData := NewModelData(table, schema.Name, w.typeMapper, w.options.FlattenSchema)
@@ -189,8 +189,8 @@ func (w *Writer) writeMultiFile(db *models.Database) error {
}
}
// Add fmt import if GetIDStr is enabled
if w.config.GenerateGetIDStr {
// Add fmt import when generated helper methods need string formatting.
if w.needsFmtImport(templateData.Models) {
templateData.AddImport("\"fmt\"")
}
@@ -295,6 +295,26 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
}
}
func (w *Writer) needsFmtImport(models []*ModelData) bool {
if w.config.GenerateGetIDStr {
for _, model := range models {
if model.PrimaryKeyField != "" && !model.PrimaryKeyIsSQL && !model.PrimaryKeyIsStr {
return true
}
}
}
if w.config.GenerateUpdateID {
for _, model := range models {
if model.PrimaryKeyField != "" && model.PrimaryKeyIsSQL && !model.PrimaryKeyIsStr {
return true
}
}
}
return false
}
// findTable finds a table by schema and name in the database
func (w *Writer) findTable(schemaName, tableName string, db *models.Database) *models.Table {
for _, schema := range db.Schemas {

View File

@@ -598,6 +598,55 @@ func TestWriter_UpdateIDTypeSafety(t *testing.T) {
}
}
func TestWriter_StringPrimaryKeyHelpers_Gorm(t *testing.T) {
table := models.InitTable("accounts", "public")
table.Columns["id"] = &models.Column{
Name: "id",
Type: "uuid",
NotNull: true,
IsPrimaryKey: true,
}
tmpDir := t.TempDir()
opts := &writers.WriterOptions{
PackageName: "models",
OutputPath: filepath.Join(tmpDir, "test.go"),
}
writer := NewWriter(opts)
err := writer.WriteTable(table)
if err != nil {
t.Fatalf("WriteTable failed: %v", err)
}
content, err := os.ReadFile(opts.OutputPath)
if err != nil {
t.Fatalf("Failed to read generated file: %v", err)
}
generated := string(content)
expectations := []string{
"ID string",
"func (m ModelPublicAccounts) GetID() string",
"return m.ID",
"func (m ModelPublicAccounts) GetIDStr() string",
"func (m ModelPublicAccounts) SetID(newid string)",
"func (m *ModelPublicAccounts) UpdateID(newid string)",
"m.ID = newid",
}
for _, expected := range expectations {
if !strings.Contains(generated, expected) {
t.Errorf("Generated code missing expected content: %q\nGenerated:\n%s", expected, generated)
}
}
if strings.Contains(generated, "GetID() int64") || strings.Contains(generated, "UpdateID(newid int64)") {
t.Errorf("String primary keys should not use int64 helper signatures\nGenerated:\n%s", generated)
}
}
func TestNameConverter_SnakeCaseToPascalCase(t *testing.T) {
tests := []struct {
input string
@@ -643,7 +692,7 @@ func TestNameConverter_Pluralize(t *testing.T) {
}
func TestTypeMapper_SQLTypeToGoType(t *testing.T) {
mapper := NewTypeMapper()
mapper := NewTypeMapper("")
tests := []struct {
sqlType string
@@ -658,6 +707,10 @@ func TestTypeMapper_SQLTypeToGoType(t *testing.T) {
{"timestamp", false, "sql_types.SqlTimeStamp"},
{"boolean", true, "bool"},
{"boolean", false, "sql_types.SqlBool"},
{"text[]", true, "sql_types.SqlStringArray"},
{"text[]", false, "sql_types.SqlStringArray"},
{"integer[]", true, "sql_types.SqlInt32Array"},
{"bigint[]", false, "sql_types.SqlInt64Array"},
}
for _, tt := range tests {
@@ -669,3 +722,38 @@ func TestTypeMapper_SQLTypeToGoType(t *testing.T) {
})
}
}
func TestTypeMapper_BuildGormTag_ArrayType(t *testing.T) {
mapper := NewTypeMapper("")
col := &models.Column{
Name: "tags",
Type: "text[]",
NotNull: false,
}
tag := mapper.BuildGormTag(col, nil)
if !strings.Contains(tag, "type:text[]") {
t.Fatalf("expected array type to be preserved, got %q", tag)
}
}
func TestTypeMapper_BuildGormTag_PreservesExplicitTypeModifiers(t *testing.T) {
mapper := NewTypeMapper("")
col := &models.Column{
Name: "embedding",
Type: "vector(1536)",
Length: 1536,
Precision: 0,
Scale: 0,
}
tag := mapper.BuildGormTag(col, nil)
if !strings.Contains(tag, "type:vector(1536)") {
t.Fatalf("expected explicit modifier to be preserved, got %q", tag)
}
if strings.Contains(tag, ")(") {
t.Fatalf("type modifier appears duplicated in %q", tag)
}
}

View File

@@ -4,6 +4,7 @@ import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
)
func (w *Writer) sqlTypeToGraphQL(sqlType string, column *models.Column, table *models.Table, schema *models.Schema) string {
@@ -33,12 +34,11 @@ func (w *Writer) sqlTypeToGraphQL(sqlType string, column *models.Column, table *
}
// Standard type mappings
baseType := strings.Split(sqlType, "(")[0] // Remove length/precision
baseType = strings.TrimSpace(baseType)
baseType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
// Handle array types
if strings.HasSuffix(baseType, "[]") {
elemType := strings.TrimSuffix(baseType, "[]")
if pgsql.IsArrayType(sqlType) {
elemType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(pgsql.ElementType(sqlType)))
gqlType := w.mapBaseTypeToGraphQL(elemType)
return "[" + gqlType + "]"
}
@@ -108,8 +108,7 @@ func (w *Writer) sqlTypeToCustomScalar(sqlType string) string {
"date": "Date",
}
baseType := strings.Split(sqlType, "(")[0]
baseType = strings.TrimSpace(baseType)
baseType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
if scalar, ok := scalarMap[baseType]; ok {
return scalar
@@ -132,8 +131,7 @@ func (w *Writer) isIntegerType(sqlType string) bool {
"smallserial": true,
}
baseType := strings.Split(sqlType, "(")[0]
baseType = strings.TrimSpace(baseType)
baseType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
return intTypes[baseType]
}

View File

@@ -31,6 +31,10 @@ type MigrationWriter struct {
// NewMigrationWriter creates a new templated migration writer
func NewMigrationWriter(options *writers.WriterOptions) (*MigrationWriter, error) {
if options == nil {
options = &writers.WriterOptions{}
}
executor, err := NewTemplateExecutor(options.FlattenSchema)
if err != nil {
return nil, fmt.Errorf("failed to create template executor: %w", err)
@@ -44,6 +48,16 @@ func NewMigrationWriter(options *writers.WriterOptions) (*MigrationWriter, error
// WriteMigration generates migration scripts using templates
func (w *MigrationWriter) WriteMigration(model *models.Database, current *models.Database) error {
if model == nil {
return fmt.Errorf("model database is required")
}
if w.options == nil {
w.options = &writers.WriterOptions{}
}
if current == nil {
current = models.InitDatabase(model.Name)
}
var writer io.Writer
var file *os.File
var err error
@@ -86,9 +100,16 @@ func (w *MigrationWriter) WriteMigration(model *models.Database, current *models
// Process each schema in the model
for _, modelSchema := range model.Schemas {
if modelSchema == nil {
continue
}
// Find corresponding schema in current database
var currentSchema *models.Schema
for _, cs := range current.Schemas {
if cs == nil {
continue
}
if strings.EqualFold(cs.Name, modelSchema.Name) {
currentSchema = cs
break
@@ -139,6 +160,17 @@ func (w *MigrationWriter) WriteMigration(model *models.Database, current *models
func (w *MigrationWriter) generateSchemaScripts(model *models.Schema, current *models.Schema) ([]MigrationScript, error) {
scripts := make([]MigrationScript, 0)
if schemaRequiresPGTrgm(model) {
scripts = append(scripts, MigrationScript{
ObjectName: "extension.pg_trgm",
ObjectType: "create extension",
Schema: model.Name,
Priority: 80,
Sequence: len(scripts),
Body: "CREATE EXTENSION IF NOT EXISTS pg_trgm;",
})
}
// Phase 1: Drop constraints and indexes that changed (Priority 11-50)
if current != nil {
dropScripts, err := w.generateDropScripts(model, current)
@@ -329,14 +361,18 @@ func (w *MigrationWriter) generateAlterTableScripts(schema *models.Schema, model
// Column doesn't exist, add it
defaultVal := ""
if modelCol.Default != nil {
if value, ok := modelCol.Default.(string); ok {
defaultVal = writers.QuoteDefaultValue(value, modelCol.Type)
} else {
defaultVal = fmt.Sprintf("%v", modelCol.Default)
}
}
sql, err := w.executor.ExecuteAddColumn(AddColumnData{
SchemaName: schema.Name,
TableName: modelTable.Name,
ColumnName: modelCol.Name,
ColumnType: pgsql.ConvertSQLType(modelCol.Type),
ColumnType: effectiveColumnSQLType(modelCol),
Default: defaultVal,
NotNull: modelCol.NotNull,
})
@@ -355,12 +391,13 @@ func (w *MigrationWriter) generateAlterTableScripts(schema *models.Schema, model
scripts = append(scripts, script)
} else if !columnsEqual(modelCol, currentCol) {
// Column exists but properties changed
if modelCol.Type != currentCol.Type {
if !columnTypesEqual(modelCol, currentCol) {
sql, err := w.executor.ExecuteAlterColumnType(AlterColumnTypeData{
SchemaName: schema.Name,
TableName: modelTable.Name,
ColumnName: modelCol.Name,
NewType: pgsql.ConvertSQLType(modelCol.Type),
NewType: effectiveAlterColumnSQLType(modelCol),
UsingExpr: buildAlterColumnUsingExpression(modelCol.Name, effectiveAlterColumnSQLType(modelCol)),
})
if err != nil {
return nil, err
@@ -382,8 +419,12 @@ func (w *MigrationWriter) generateAlterTableScripts(schema *models.Schema, model
setDefault := modelCol.Default != nil
defaultVal := ""
if setDefault {
if value, ok := modelCol.Default.(string); ok {
defaultVal = writers.QuoteDefaultValue(value, modelCol.Type)
} else {
defaultVal = fmt.Sprintf("%v", modelCol.Default)
}
}
sql, err := w.executor.ExecuteAlterColumnDefault(AlterColumnDefaultData{
SchemaName: schema.Name,
@@ -537,12 +578,17 @@ func (w *MigrationWriter) generateIndexScripts(model *models.Schema, current *mo
indexType = modelIndex.Type
}
columnExprs := buildIndexColumnExpressions(modelTable, modelIndex, indexType)
if len(columnExprs) == 0 {
continue
}
sql, err := w.executor.ExecuteCreateIndex(CreateIndexData{
SchemaName: model.Name,
TableName: modelTable.Name,
IndexName: indexName,
IndexType: indexType,
Columns: strings.Join(modelIndex.Columns, ", "),
Columns: strings.Join(columnExprs, ", "),
Unique: modelIndex.Unique,
})
if err != nil {
@@ -565,6 +611,26 @@ func (w *MigrationWriter) generateIndexScripts(model *models.Schema, current *mo
return scripts, nil
}
func buildIndexColumnExpressions(table *models.Table, index *models.Index, indexType string) []string {
columnExprs := make([]string, 0, len(index.Columns))
for _, colName := range index.Columns {
colExpr := colName
if table != nil {
if col, ok := resolveIndexColumn(table, colName); ok && col != nil {
colExpr = col.SQLName()
if strings.EqualFold(indexType, "gin") {
opClass := ginOperatorClassForColumn(col, index.Comment)
if opClass != "" {
colExpr = fmt.Sprintf("%s %s", col.SQLName(), opClass)
}
}
}
}
columnExprs = append(columnExprs, colExpr)
}
return columnExprs
}
// generateForeignKeyScripts generates ADD CONSTRAINT FOREIGN KEY scripts using templates
func (w *MigrationWriter) generateForeignKeyScripts(model *models.Schema, current *models.Schema) ([]MigrationScript, error) {
scripts := make([]MigrationScript, 0)
@@ -820,11 +886,21 @@ func columnsEqual(col1, col2 *models.Column) bool {
if col1 == nil || col2 == nil {
return false
}
return strings.EqualFold(col1.Type, col2.Type) &&
return columnTypesEqual(col1, col2) &&
col1.NotNull == col2.NotNull &&
fmt.Sprintf("%v", col1.Default) == fmt.Sprintf("%v", col2.Default)
}
func columnTypesEqual(col1, col2 *models.Column) bool {
if col1 == nil || col2 == nil {
return false
}
return strings.EqualFold(
pgsql.NormalizeEquivalentSQLType(effectiveColumnSQLType(col1)),
pgsql.NormalizeEquivalentSQLType(effectiveColumnSQLType(col2)),
)
}
// constraintsEqual checks if two constraints are equal
func constraintsEqual(c1, c2 *models.Constraint) bool {
if c1 == nil || c2 == nil {

View File

@@ -57,6 +57,410 @@ func TestWriteMigration_NewTable(t *testing.T) {
}
}
func TestWriteMigration_ArrayDefault(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
table := models.InitTable("plans", "public")
tagsCol := models.InitColumn("tags", "plans", "public")
tagsCol.Type = "text[]"
tagsCol.NotNull = true
tagsCol.Default = "''{}''"
table.Columns["tags"] = tagsCol
modelSchema.Tables = append(modelSchema.Tables, table)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
err = writer.WriteMigration(model, current)
if err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "tags text[] DEFAULT '{}' NOT NULL") {
t.Fatalf("expected normalized array default in migration, got:\n%s", output)
}
if strings.Contains(output, "'''{}'''") {
t.Fatalf("migration still contains triple-quoted array default:\n%s", output)
}
}
func TestWriteMigration_AltersColumnTypeWhenActualTypeDiffers(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
currentTable := models.InitTable("learnings", "public")
currentDetails := models.InitColumn("details", "learnings", "public")
currentDetails.Type = "jsonb"
currentTable.Columns["details"] = currentDetails
currentSchema.Tables = append(currentSchema.Tables, currentTable)
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
modelTable := models.InitTable("learnings", "public")
modelDetails := models.InitColumn("details", "learnings", "public")
modelDetails.Type = "text"
modelTable.Columns["details"] = modelDetails
modelSchema.Tables = append(modelSchema.Tables, modelTable)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "ALTER TABLE public.learnings") || !strings.Contains(output, "ALTER COLUMN details TYPE text") {
t.Fatalf("expected migration to alter mismatched column type, got:\n%s", output)
}
if !strings.Contains(output, `ALTER COLUMN details TYPE text USING details::text;`) {
t.Fatalf("expected migration type alter to include USING cast, got:\n%s", output)
}
}
func TestWriteMigration_UsesStorageTypeForSerialAlterStatements(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
currentTable := models.InitTable("learnings", "public")
currentID := models.InitColumn("id", "learnings", "public")
currentID.Type = "uuid"
currentTable.Columns["id"] = currentID
currentSchema.Tables = append(currentSchema.Tables, currentTable)
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
modelTable := models.InitTable("learnings", "public")
modelID := models.InitColumn("id", "learnings", "public")
modelID.Type = "bigserial"
modelTable.Columns["id"] = modelID
modelSchema.Tables = append(modelSchema.Tables, modelTable)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "ALTER COLUMN id TYPE bigint") {
t.Fatalf("expected serial alter to use bigint storage type, got:\n%s", output)
}
if strings.Contains(output, "ALTER COLUMN id TYPE bigserial;") {
t.Fatalf("did not expect invalid bigserial alter statement, got:\n%s", output)
}
if !strings.Contains(output, `ALTER COLUMN id TYPE bigint USING id::bigint;`) {
t.Fatalf("expected serial alter to include USING cast, got:\n%s", output)
}
}
func TestWriteMigration_ArrayAlterIncludesUsingCast(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
currentTable := models.InitTable("learnings", "public")
currentTags := models.InitColumn("tags", "learnings", "public")
currentTags.Type = "text"
currentTable.Columns["tags"] = currentTags
currentSchema.Tables = append(currentSchema.Tables, currentTable)
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
modelTable := models.InitTable("learnings", "public")
modelTags := models.InitColumn("tags", "learnings", "public")
modelTags.Type = "text[]"
modelTable.Columns["tags"] = modelTags
modelSchema.Tables = append(modelSchema.Tables, modelTable)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, `ALTER COLUMN tags TYPE text[] USING tags::text[];`) {
t.Fatalf("expected array alter to include USING cast, got:\n%s", output)
}
}
func TestWriteMigration_DoesNotAlterEquivalentNormalizedColumnType(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
currentTable := models.InitTable("users", "public")
currentEmail := models.InitColumn("email", "users", "public")
currentEmail.Type = "character varying"
currentEmail.Length = 255
currentTable.Columns["email"] = currentEmail
currentSchema.Tables = append(currentSchema.Tables, currentTable)
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
modelTable := models.InitTable("users", "public")
modelEmail := models.InitColumn("email", "users", "public")
modelEmail.Type = "varchar(255)"
modelTable.Columns["email"] = modelEmail
modelSchema.Tables = append(modelSchema.Tables, modelTable)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if strings.Contains(output, "ALTER COLUMN email TYPE") {
t.Fatalf("did not expect alter type for equivalent normalized types, got:\n%s", output)
}
}
func TestWriteMigration_GinIndexOnTextUsesTrigramOperatorClass(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
table := models.InitTable("articles", "public")
titleCol := models.InitColumn("title", "articles", "public")
titleCol.Type = "text"
table.Columns["title"] = titleCol
index := &models.Index{
Name: "idx_articles_title_gin",
Type: "gin",
Columns: []string{"title"},
}
table.Indexes[index.Name] = index
modelSchema.Tables = append(modelSchema.Tables, table)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "CREATE EXTENSION IF NOT EXISTS pg_trgm;") {
t.Fatalf("expected trigram extension for text GIN migration index, got:\n%s", output)
}
if !strings.Contains(output, "USING gin (title gin_trgm_ops)") {
t.Fatalf("expected GIN text index to include gin_trgm_ops, got:\n%s", output)
}
}
func TestWriteMigration_GinIndexOnQuotedTextColumnUsesTrigramOperatorClass(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
table := models.InitTable("agent_personas", "public")
nameCol := models.InitColumn("name", "agent_personas", "public")
nameCol.Type = "text"
table.Columns["name"] = nameCol
index := &models.Index{
Name: "idx_agent_personas_name_gin",
Type: "gin",
Columns: []string{`"name"`},
}
table.Indexes[index.Name] = index
modelSchema.Tables = append(modelSchema.Tables, table)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "USING gin (name gin_trgm_ops)") {
t.Fatalf("expected quoted text column GIN index to include gin_trgm_ops, got:\n%s", output)
}
}
func TestWriteMigration_GinIndexOnTextArrayDoesNotUseTrigramOperatorClass(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
table := models.InitTable("plans", "public")
tagsCol := models.InitColumn("tags", "plans", "public")
tagsCol.Type = "text[]"
table.Columns["tags"] = tagsCol
index := &models.Index{
Name: "idx_plans_tags",
Type: "gin",
Columns: []string{"tags"},
}
table.Indexes[index.Name] = index
modelSchema.Tables = append(modelSchema.Tables, table)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "USING gin (tags array_ops)") {
t.Fatalf("expected GIN array index with array_ops, got:\n%s", output)
}
if strings.Contains(output, "gin_trgm_ops") {
t.Fatalf("did not expect gin_trgm_ops for text[] migration index, got:\n%s", output)
}
}
func TestWriteMigration_GinIndexOnJSONBUsesJSONBOperatorClass(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
table := models.InitTable("learnings", "public")
detailsCol := models.InitColumn("details", "learnings", "public")
detailsCol.Type = "jsonb"
table.Columns["details"] = detailsCol
index := &models.Index{
Name: "idx_learnings_details",
Type: "gin",
Columns: []string{"details"},
}
table.Indexes[index.Name] = index
modelSchema.Tables = append(modelSchema.Tables, table)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "USING gin (details jsonb_ops)") {
t.Fatalf("expected GIN jsonb index to include jsonb_ops, got:\n%s", output)
}
if strings.Contains(output, "gin_trgm_ops") {
t.Fatalf("did not expect gin_trgm_ops for jsonb migration index, got:\n%s", output)
}
}
func TestWriteMigration_GinIndexOnJSONBIgnoresIncompatibleTrigramOperatorClass(t *testing.T) {
current := models.InitDatabase("testdb")
currentSchema := models.InitSchema("public")
current.Schemas = append(current.Schemas, currentSchema)
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
table := models.InitTable("learnings", "public")
detailsCol := models.InitColumn("details", "learnings", "public")
detailsCol.Type = "jsonb"
table.Columns["details"] = detailsCol
index := &models.Index{
Name: "idx_learnings_details",
Type: "gin",
Columns: []string{"details"},
Comment: "gin_trgm_ops",
}
table.Indexes[index.Name] = index
modelSchema.Tables = append(modelSchema.Tables, table)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(&writers.WriterOptions{})
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, current); err != nil {
t.Fatalf("WriteMigration failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "USING gin (details jsonb_ops)") {
t.Fatalf("expected incompatible trigram hint on jsonb to fall back to jsonb_ops, got:\n%s", output)
}
}
func TestWriteMigration_WithAudit(t *testing.T) {
// Current database (empty)
current := models.InitDatabase("testdb")
@@ -282,3 +686,46 @@ func TestWriteMigration_NumericConstraintNames(t *testing.T) {
t.Error("Migration missing FOREIGN KEY")
}
}
func TestNewMigrationWriter_NilOptions(t *testing.T) {
writer, err := NewMigrationWriter(nil)
if err != nil {
t.Fatalf("NewMigrationWriter(nil) returned error: %v", err)
}
if writer == nil {
t.Fatal("expected writer instance")
}
if writer.options == nil {
t.Fatal("expected default writer options to be initialized")
}
}
func TestWriteMigration_NilCurrentTreatsDatabaseAsEmpty(t *testing.T) {
model := models.InitDatabase("testdb")
modelSchema := models.InitSchema("public")
table := models.InitTable("users", "public")
idCol := models.InitColumn("id", "users", "public")
idCol.Type = "integer"
idCol.NotNull = true
table.Columns["id"] = idCol
modelSchema.Tables = append(modelSchema.Tables, table)
model.Schemas = append(model.Schemas, modelSchema)
var buf bytes.Buffer
writer, err := NewMigrationWriter(nil)
if err != nil {
t.Fatalf("Failed to create writer: %v", err)
}
writer.writer = &buf
if err := writer.WriteMigration(model, nil); err != nil {
t.Fatalf("WriteMigration with nil current failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "CREATE TABLE") {
t.Fatalf("expected CREATE TABLE in migration output, got:\n%s", output)
}
}

View File

@@ -8,6 +8,7 @@ import (
"text/template"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
//go:embed templates/*.tmpl
@@ -94,6 +95,16 @@ type AlterColumnTypeData struct {
TableName string
ColumnName string
NewType string
UsingExpr string
}
type AlterColumnTypeWithCheckData struct {
SchemaName string
TableName string
ColumnName string
NewType string
EquivalentTypes string
UsingExpr string
}
// AlterColumnDefaultData contains data for alter column default template
@@ -266,6 +277,7 @@ type CreatePrimaryKeyWithAutoGenCheckData struct {
ConstraintName string
AutoGenNames string // Comma-separated list of names like "'name1', 'name2'"
Columns string
ColumnNames string // Comma-separated list of quoted column names like "'id', 'tenant_id'"
}
// Execute methods for each template
@@ -300,6 +312,15 @@ func (te *TemplateExecutor) ExecuteAlterColumnType(data AlterColumnTypeData) (st
return buf.String(), nil
}
func (te *TemplateExecutor) ExecuteAlterColumnTypeWithCheck(data AlterColumnTypeWithCheckData) (string, error) {
var buf bytes.Buffer
err := te.templates.ExecuteTemplate(&buf, "alter_column_type_with_check.tmpl", data)
if err != nil {
return "", fmt.Errorf("failed to execute alter_column_type_with_check template: %w", err)
}
return buf.String(), nil
}
// ExecuteAlterColumnDefault executes the alter column default template
func (te *TemplateExecutor) ExecuteAlterColumnDefault(data AlterColumnDefaultData) (string, error) {
var buf bytes.Buffer
@@ -495,8 +516,12 @@ func BuildCreateTableData(schemaName string, table *models.Table) CreateTableDat
NotNull: col.NotNull,
}
if col.Default != nil {
if value, ok := col.Default.(string); ok {
colData.Default = writers.QuoteDefaultValue(value, col.Type)
} else {
colData.Default = fmt.Sprintf("%v", col.Default)
}
}
columns = append(columns, colData)
}

View File

@@ -1,2 +1,2 @@
ALTER TABLE {{qual_table .SchemaName .TableName}}
ALTER COLUMN {{quote_ident .ColumnName}} TYPE {{.NewType}};
ALTER COLUMN {{quote_ident .ColumnName}} TYPE {{.NewType}}{{if .UsingExpr}} USING {{.UsingExpr}}{{end}};

View File

@@ -0,0 +1,22 @@
DO $$
DECLARE
current_type text;
BEGIN
SELECT pg_catalog.format_type(a.atttypid, a.atttypmod)
INTO current_type
FROM pg_attribute a
JOIN pg_class t ON t.oid = a.attrelid
JOIN pg_namespace n ON n.oid = t.relnamespace
WHERE n.nspname = '{{.SchemaName}}'
AND t.relname = '{{.TableName}}'
AND a.attname = '{{.ColumnName}}'
AND a.attnum > 0
AND NOT a.attisdropped;
IF current_type IS NOT NULL
AND current_type <> ALL(ARRAY[{{.EquivalentTypes}}]) THEN
ALTER TABLE {{qual_table .SchemaName .TableName}}
ALTER COLUMN {{quote_ident .ColumnName}} TYPE {{.NewType}}{{if .UsingExpr}} USING {{.UsingExpr}}{{end}};
END IF;
END;
$$;

View File

@@ -1,26 +1,42 @@
DO $$
DECLARE
auto_pk_name text;
current_pk_name text;
current_pk_matches boolean := false;
BEGIN
-- Drop auto-generated primary key if it exists
SELECT constraint_name INTO auto_pk_name
FROM information_schema.table_constraints
WHERE table_schema = '{{.SchemaName}}'
AND table_name = '{{.TableName}}'
AND constraint_type = 'PRIMARY KEY'
AND constraint_name IN ({{.AutoGenNames}});
SELECT tc.constraint_name,
COALESCE(
ARRAY(
SELECT a.attname::text
FROM pg_constraint c
JOIN pg_class t ON t.oid = c.conrelid
JOIN pg_namespace n ON n.oid = t.relnamespace
JOIN unnest(c.conkey) WITH ORDINALITY AS cols(attnum, ord)
ON TRUE
JOIN pg_attribute a
ON a.attrelid = t.oid
AND a.attnum = cols.attnum
WHERE c.contype = 'p'
AND n.nspname = '{{.SchemaName}}'
AND t.relname = '{{.TableName}}'
ORDER BY cols.ord
),
ARRAY[]::text[]
) = ARRAY[{{.ColumnNames}}]
INTO current_pk_name, current_pk_matches
FROM information_schema.table_constraints tc
WHERE tc.table_schema = '{{.SchemaName}}'
AND tc.table_name = '{{.TableName}}'
AND tc.constraint_type = 'PRIMARY KEY';
IF auto_pk_name IS NOT NULL THEN
EXECUTE 'ALTER TABLE {{qual_table .SchemaName .TableName}} DROP CONSTRAINT ' || quote_ident(auto_pk_name);
IF current_pk_name IS NOT NULL
AND NOT current_pk_matches
AND current_pk_name IN ({{.AutoGenNames}}) THEN
EXECUTE 'ALTER TABLE {{qual_table .SchemaName .TableName}} DROP CONSTRAINT ' || quote_ident(current_pk_name);
END IF;
-- Add named primary key if it doesn't exist
IF NOT EXISTS (
SELECT 1 FROM information_schema.table_constraints
WHERE table_schema = '{{.SchemaName}}'
AND table_name = '{{.TableName}}'
AND constraint_name = '{{.ConstraintName}}'
) THEN
-- Add the desired primary key only when no matching primary key already exists.
IF current_pk_name IS NULL
OR (NOT current_pk_matches AND current_pk_name IN ({{.AutoGenNames}})) THEN
ALTER TABLE {{qual_table .SchemaName .TableName}} ADD CONSTRAINT {{quote_ident .ConstraintName}} PRIMARY KEY ({{.Columns}});
END IF;
END;

View File

@@ -10,8 +10,6 @@ import (
"strings"
"time"
"github.com/jackc/pgx/v5"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
@@ -145,6 +143,10 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
statements = append(statements, fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s", schema.SQLName()))
}
if schemaRequiresPGTrgm(schema) {
statements = append(statements, `CREATE EXTENSION IF NOT EXISTS pg_trgm`)
}
// Phase 2: Create sequences
for _, table := range schema.Tables {
pk := table.GetPrimaryKey()
@@ -183,6 +185,12 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
}
statements = append(statements, addColStmts...)
alterTypeStmts, err := w.GenerateAlterColumnTypeStatements(schema)
if err != nil {
return nil, fmt.Errorf("failed to generate alter column type statements: %w", err)
}
statements = append(statements, alterTypeStmts...)
// Phase 4: Primary keys
for _, table := range schema.Tables {
// First check for explicit PrimaryKeyConstraint
@@ -230,6 +238,7 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
ConstraintName: pkName,
AutoGenNames: formatStringList(autoGenPKNames),
Columns: strings.Join(pkColumns, ", "),
ColumnNames: formatStringList(pkColumns),
}
stmt, err := w.executor.ExecuteCreatePrimaryKeyWithAutoGenCheck(data)
@@ -262,16 +271,13 @@ func (w *Writer) GenerateSchemaStatements(schema *models.Schema) ([]string, erro
columnExprs := make([]string, 0, len(index.Columns))
for _, colName := range index.Columns {
colExpr := colName
if col, ok := table.Columns[colName]; ok {
// For GIN indexes on text columns, add operator class
if strings.EqualFold(indexType, "gin") && isTextType(col.Type) {
opClass := extractOperatorClass(index.Comment)
if opClass == "" {
opClass = "gin_trgm_ops"
}
if col, ok := resolveIndexColumn(table, colName); ok {
if strings.EqualFold(indexType, "gin") {
if opClass := ginOperatorClassForColumn(col, index.Comment); opClass != "" {
colExpr = fmt.Sprintf("%s %s", colName, opClass)
}
}
}
columnExprs = append(columnExprs, colExpr)
}
@@ -438,6 +444,33 @@ func (w *Writer) GenerateAddColumnStatements(schema *models.Schema) ([]string, e
return statements, nil
}
func (w *Writer) GenerateAlterColumnTypeStatements(schema *models.Schema) ([]string, error) {
statements := []string{}
statements = append(statements, fmt.Sprintf("-- Alter column types for schema: %s", schema.Name))
for _, table := range schema.Tables {
columns := getSortedColumns(table.Columns)
for _, col := range columns {
targetType := effectiveAlterColumnSQLType(col)
stmt, err := w.executor.ExecuteAlterColumnTypeWithCheck(AlterColumnTypeWithCheckData{
SchemaName: schema.Name,
TableName: table.Name,
ColumnName: col.Name,
NewType: targetType,
EquivalentTypes: equivalentTypeListSQL(targetType),
UsingExpr: buildAlterColumnUsingExpression(col.Name, targetType),
})
if err != nil {
return nil, fmt.Errorf("failed to generate alter column type for %s.%s.%s: %w", schema.Name, table.Name, col.Name, err)
}
statements = append(statements, stmt)
}
}
return statements, nil
}
// GenerateAddColumnsForDatabase generates ALTER TABLE ADD COLUMN statements for the entire database
func (w *Writer) GenerateAddColumnsForDatabase(db *models.Database) ([]string, error) {
statements := []string{}
@@ -490,30 +523,7 @@ func (w *Writer) generateCreateTableStatement(schema *models.Schema, table *mode
func (w *Writer) generateColumnDefinition(col *models.Column) string {
parts := []string{col.SQLName()}
// Type with length/precision - convert to valid PostgreSQL type
baseType := pgsql.ConvertSQLType(col.Type)
typeStr := baseType
// Only add size specifiers for types that support them
if col.Length > 0 && col.Precision == 0 {
if supportsLength(baseType) {
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Length)
} else if isTextTypeWithoutLength(baseType) {
// Convert text with length to varchar
typeStr = fmt.Sprintf("varchar(%d)", col.Length)
}
// For types that don't support length (integer, bigint, etc.), ignore the length
} else if col.Precision > 0 {
if supportsPrecision(baseType) {
if col.Scale > 0 {
typeStr = fmt.Sprintf("%s(%d,%d)", baseType, col.Precision, col.Scale)
} else {
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Precision)
}
}
// For types that don't support precision, ignore it
}
parts = append(parts, typeStr)
parts = append(parts, effectiveColumnSQLType(col))
// NOT NULL
if col.NotNull {
@@ -524,15 +534,7 @@ func (w *Writer) generateColumnDefinition(col *models.Column) string {
if col.Default != nil {
switch v := col.Default.(type) {
case string:
// Strip backticks - DBML uses them for SQL expressions but PostgreSQL doesn't
cleanDefault := stripBackticks(v)
if strings.HasPrefix(cleanDefault, "nextval") || strings.HasPrefix(cleanDefault, "CURRENT_") || strings.Contains(cleanDefault, "()") {
parts = append(parts, fmt.Sprintf("DEFAULT %s", cleanDefault))
} else if cleanDefault == "true" || cleanDefault == "false" {
parts = append(parts, fmt.Sprintf("DEFAULT %s", cleanDefault))
} else {
parts = append(parts, fmt.Sprintf("DEFAULT '%s'", escapeQuote(cleanDefault)))
}
parts = append(parts, fmt.Sprintf("DEFAULT %s", writers.QuoteDefaultValue(stripBackticks(v), col.Type)))
case bool:
parts = append(parts, fmt.Sprintf("DEFAULT %v", v))
default:
@@ -543,6 +545,64 @@ func (w *Writer) generateColumnDefinition(col *models.Column) string {
return strings.Join(parts, " ")
}
func effectiveColumnSQLType(col *models.Column) string {
if col == nil {
return ""
}
baseType := pgsql.ConvertSQLType(col.Type)
typeStr := baseType
hasExplicitTypeModifier := pgsql.HasExplicitTypeModifier(baseType)
if !hasExplicitTypeModifier && col.Length > 0 && col.Precision == 0 {
if pgsql.SupportsLength(baseType) {
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Length)
} else if isTextTypeWithoutLength(baseType) {
typeStr = fmt.Sprintf("varchar(%d)", col.Length)
}
} else if !hasExplicitTypeModifier && col.Precision > 0 {
if pgsql.SupportsPrecision(baseType) {
if col.Scale > 0 {
typeStr = fmt.Sprintf("%s(%d,%d)", baseType, col.Precision, col.Scale)
} else {
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Precision)
}
}
}
return typeStr
}
func effectiveAlterColumnSQLType(col *models.Column) string {
typeStr := effectiveColumnSQLType(col)
switch strings.ToLower(strings.TrimSpace(typeStr)) {
case "smallserial":
return "smallint"
case "serial":
return "integer"
case "bigserial":
return "bigint"
default:
return typeStr
}
}
func buildAlterColumnUsingExpression(columnName, targetType string) string {
if strings.TrimSpace(columnName) == "" || strings.TrimSpace(targetType) == "" {
return ""
}
return fmt.Sprintf("%s::%s", quoteIdent(columnName), targetType)
}
func equivalentTypeListSQL(sqlType string) string {
variants := pgsql.EquivalentSQLTypeVariants(sqlType)
quoted := make([]string, 0, len(variants))
for _, variant := range variants {
quoted = append(quoted, fmt.Sprintf("'%s'", escapeQuote(variant)))
}
return strings.Join(quoted, ", ")
}
// WriteSchema writes a single schema and all its tables
func (w *Writer) WriteSchema(schema *models.Schema) error {
if w.writer == nil {
@@ -554,6 +614,10 @@ func (w *Writer) WriteSchema(schema *models.Schema) error {
return err
}
if err := w.writeRequiredExtensions(schema); err != nil {
return err
}
// Phase 2: Create sequences (priority 80)
if err := w.writeSequences(schema); err != nil {
return err
@@ -569,6 +633,10 @@ func (w *Writer) WriteSchema(schema *models.Schema) error {
return err
}
if err := w.writeAlterColumnTypes(schema); err != nil {
return err
}
// Phase 4: Create primary keys (priority 160)
if err := w.writePrimaryKeys(schema); err != nil {
return err
@@ -669,6 +737,16 @@ func (w *Writer) writeCreateSchema(schema *models.Schema) error {
return nil
}
func (w *Writer) writeRequiredExtensions(schema *models.Schema) error {
if !schemaRequiresPGTrgm(schema) {
return nil
}
fmt.Fprintln(w.writer, "CREATE EXTENSION IF NOT EXISTS pg_trgm;")
fmt.Fprintln(w.writer)
return nil
}
// writeSequences generates CREATE SEQUENCE statements for identity columns
func (w *Writer) writeSequences(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Sequences for schema: %s\n", schema.Name)
@@ -762,6 +840,21 @@ func (w *Writer) writeAddColumns(schema *models.Schema) error {
return nil
}
func (w *Writer) writeAlterColumnTypes(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Alter column types for schema: %s\n", schema.Name)
statements, err := w.GenerateAlterColumnTypeStatements(schema)
if err != nil {
return err
}
for _, stmt := range statements[1:] {
fmt.Fprint(w.writer, stmt)
fmt.Fprint(w.writer, "\n")
}
return nil
}
// writePrimaryKeys generates ALTER TABLE statements for primary keys
func (w *Writer) writePrimaryKeys(schema *models.Schema) error {
fmt.Fprintf(w.writer, "-- Primary keys for schema: %s\n", schema.Name)
@@ -815,6 +908,7 @@ func (w *Writer) writePrimaryKeys(schema *models.Schema) error {
ConstraintName: pkName,
AutoGenNames: formatStringList(autoGenPKNames),
Columns: strings.Join(columnNames, ", "),
ColumnNames: formatStringList(columnNames),
}
sql, err := w.executor.ExecuteCreatePrimaryKeyWithAutoGenCheck(data)
@@ -862,16 +956,14 @@ func (w *Writer) writeIndexes(schema *models.Schema) error {
// Build column list with operator class support for GIN indexes
columnExprs := make([]string, 0, len(index.Columns))
for _, colName := range index.Columns {
if col, ok := table.Columns[colName]; ok {
if col, ok := resolveIndexColumn(table, colName); ok {
colExpr := col.SQLName()
// For GIN indexes on text columns, add operator class
if strings.EqualFold(index.Type, "gin") && isTextType(col.Type) {
opClass := extractOperatorClass(index.Comment)
if opClass == "" {
opClass = "gin_trgm_ops"
}
if strings.EqualFold(index.Type, "gin") {
opClass := ginOperatorClassForColumn(col, index.Comment)
if opClass != "" {
colExpr = fmt.Sprintf("%s %s", col.SQLName(), opClass)
}
}
columnExprs = append(columnExprs, colExpr)
}
}
@@ -1257,46 +1349,128 @@ func isIntegerType(colType string) bool {
}
// isTextType checks if a column type is a text type (for GIN index operator class)
func isTextType(colType string) bool {
textTypes := []string{"text", "varchar", "character varying", "char", "character", "string"}
lowerType := strings.ToLower(colType)
for _, t := range textTypes {
if strings.HasPrefix(lowerType, t) {
return true
}
}
return false
}
// supportsLength checks if a PostgreSQL type supports length specification
func supportsLength(colType string) bool {
lengthTypes := []string{"varchar", "character varying", "char", "character", "bit", "bit varying", "varbit"}
lowerType := strings.ToLower(colType)
for _, t := range lengthTypes {
if lowerType == t || strings.HasPrefix(lowerType, t+"(") {
return true
}
}
return false
}
// supportsPrecision checks if a PostgreSQL type supports precision/scale specification
func supportsPrecision(colType string) bool {
precisionTypes := []string{"numeric", "decimal", "time", "timestamp", "timestamptz", "timestamp with time zone", "timestamp without time zone", "time with time zone", "time without time zone", "interval"}
lowerType := strings.ToLower(colType)
for _, t := range precisionTypes {
if lowerType == t || strings.HasPrefix(lowerType, t+"(") {
return true
}
}
return false
}
// func isTextType(colType string) bool {
// textTypes := []string{"text", "varchar", "character varying", "char", "character", "string"}
// lowerType := strings.ToLower(colType)
// if strings.HasSuffix(lowerType, "[]") {
// return false
// }
// for _, t := range textTypes {
// if strings.HasPrefix(lowerType, t) {
// return true
// }
// }
// return false
// }
// isTextTypeWithoutLength checks if type is text (which should convert to varchar when length is specified)
func isTextTypeWithoutLength(colType string) bool {
return strings.EqualFold(colType, "text")
}
func ginOperatorClassForColumn(col *models.Column, comment string) string {
if col == nil {
return ""
}
sqlType := effectiveColumnSQLType(col)
baseType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
isArray := pgsql.IsArrayType(sqlType)
requested := extractOperatorClass(comment)
if requested != "" && ginOperatorClassCompatible(baseType, isArray, requested) {
return requested
}
if isArray {
return "array_ops"
}
switch {
case isTextGinBaseType(baseType):
return "gin_trgm_ops"
case baseType == "jsonb":
return "jsonb_ops"
default:
return requested
}
}
func ginOperatorClassCompatible(baseType string, isArray bool, opClass string) bool {
switch opClass {
case "gin_trgm_ops", "gin_bigm_ops":
return !isArray && isTextGinBaseType(baseType)
case "jsonb_ops", "jsonb_path_ops":
return !isArray && baseType == "jsonb"
case "array_ops":
return isArray
default:
return true
}
}
func isTextGinBaseType(baseType string) bool {
switch baseType {
case "text", "varchar", "character varying", "char", "character", "string", "citext", "bpchar":
return true
default:
return false
}
}
func schemaRequiresPGTrgm(schema *models.Schema) bool {
if schema == nil {
return false
}
for _, table := range schema.Tables {
if table == nil {
continue
}
for _, index := range table.Indexes {
if index == nil || !strings.EqualFold(index.Type, "gin") {
continue
}
for _, colName := range index.Columns {
col, ok := resolveIndexColumn(table, colName)
if !ok || col == nil {
continue
}
if ginOperatorClassForColumn(col, index.Comment) == "gin_trgm_ops" {
return true
}
}
}
}
return false
}
func resolveIndexColumn(table *models.Table, colName string) (*models.Column, bool) {
if table == nil {
return nil, false
}
if col, ok := table.Columns[colName]; ok && col != nil {
return col, true
}
normalized := strings.ToLower(strings.Trim(colName, `"`))
for key, col := range table.Columns {
if col == nil {
continue
}
if strings.ToLower(strings.Trim(key, `"`)) == normalized {
return col, true
}
if strings.ToLower(strings.Trim(col.Name, `"`)) == normalized {
return col, true
}
if strings.ToLower(strings.Trim(col.SQLName(), `"`)) == normalized {
return col, true
}
}
return nil, false
}
// formatStringList formats a list of strings as a SQL-safe comma-separated quoted list
func formatStringList(items []string) string {
quoted := make([]string, len(items))
@@ -1376,7 +1550,7 @@ func (w *Writer) executeDatabaseSQL(db *models.Database, connString string) erro
// Connect to database
ctx := context.Background()
conn, err := pgx.Connect(ctx, connString)
conn, err := pgsql.Connect(ctx, connString, "writer-pgsql")
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}

View File

@@ -87,6 +87,117 @@ func TestWriteDatabase(t *testing.T) {
}
}
func TestWriteDatabase_GinIndexOnTextArrayDoesNotUseTrigramOperatorClass(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("plans", "public")
tagsCol := models.InitColumn("tags", "plans", "public")
tagsCol.Type = "text[]"
table.Columns["tags"] = tagsCol
index := &models.Index{
Name: "idx_plans_tags",
Type: "gin",
Columns: []string{"tags"},
}
table.Indexes[index.Name] = index
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
writer := NewWriter(&writers.WriterOptions{})
writer.writer = &buf
if err := writer.WriteDatabase(db); err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, `USING gin (tags array_ops)`) {
t.Fatalf("expected GIN index on array column with array_ops, got:\n%s", output)
}
if strings.Contains(output, "gin_trgm_ops") {
t.Fatalf("did not expect gin_trgm_ops for text[] column, got:\n%s", output)
}
}
func TestWriteDatabase_GinIndexOnQuotedTextColumnUsesTrigramOperatorClass(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("agent_personas", "public")
nameCol := models.InitColumn("name", "agent_personas", "public")
nameCol.Type = "text"
table.Columns["name"] = nameCol
index := &models.Index{
Name: "idx_agent_personas_name_gin",
Type: "gin",
Columns: []string{`"name"`},
}
table.Indexes[index.Name] = index
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
writer := NewWriter(&writers.WriterOptions{})
writer.writer = &buf
if err := writer.WriteDatabase(db); err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, `CREATE EXTENSION IF NOT EXISTS pg_trgm`) {
t.Fatalf("expected trigram extension for text GIN index, got:\n%s", output)
}
if !strings.Contains(output, `USING gin (name gin_trgm_ops)`) {
t.Fatalf("expected quoted text GIN index to include gin_trgm_ops, got:\n%s", output)
}
}
func TestWriteDatabase_GinIndexOnJSONBUsesJSONBOperatorClass(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("learnings", "public")
detailsCol := models.InitColumn("details", "learnings", "public")
detailsCol.Type = "jsonb"
table.Columns["details"] = detailsCol
index := &models.Index{
Name: "idx_learnings_details",
Type: "gin",
Columns: []string{"details"},
}
table.Indexes[index.Name] = index
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
writer := NewWriter(&writers.WriterOptions{})
writer.writer = &buf
if err := writer.WriteDatabase(db); err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, `USING gin (details jsonb_ops)`) {
t.Fatalf("expected GIN jsonb index to include jsonb_ops, got:\n%s", output)
}
if strings.Contains(output, "gin_trgm_ops") {
t.Fatalf("did not expect gin_trgm_ops for jsonb column, got:\n%s", output)
}
}
func TestWriteForeignKeys(t *testing.T) {
// Create a test database with two related tables
db := models.InitDatabase("testdb")
@@ -636,9 +747,14 @@ func TestPrimaryKeyExistenceCheck(t *testing.T) {
t.Errorf("Output missing logic to drop auto-generated primary key\nFull output:\n%s", output)
}
// Verify it checks for our specific named constraint before adding it
if !strings.Contains(output, "constraint_name = 'pk_public_products'") {
t.Errorf("Output missing check for our named primary key constraint\nFull output:\n%s", output)
// Verify it compares the current primary key columns before dropping/recreating
if !strings.Contains(output, "current_pk_matches") || !strings.Contains(output, "ARRAY['id']") {
t.Errorf("Output missing safe primary key comparison logic\nFull output:\n%s", output)
}
// Verify it only adds the desired key when no PK exists or an auto-generated mismatch was dropped
if !strings.Contains(output, "current_pk_name IS NULL") || !strings.Contains(output, "current_pk_name IN ('products_pkey', 'public_products_pkey')") {
t.Errorf("Output missing guarded primary key creation logic\nFull output:\n%s", output)
}
}
@@ -729,6 +845,93 @@ func TestColumnSizeSpecifiers(t *testing.T) {
}
}
func TestWriteDatabase_PrimaryKeyTemplateDoesNotDropMatchingAutoPrimaryKey(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("learnings", "public")
idCol := models.InitColumn("id", "learnings", "public")
idCol.Type = "bigint"
idCol.IsPrimaryKey = true
table.Columns["id"] = idCol
parentCol := models.InitColumn("duplicate_of_learning_id", "learnings", "public")
parentCol.Type = "bigint"
table.Columns["duplicate_of_learning_id"] = parentCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
writer := NewWriter(&writers.WriterOptions{})
writer.writer = &buf
if err := writer.WriteDatabase(db); err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "current_pk_matches") {
t.Fatalf("expected generated SQL to compare current PK columns, got:\n%s", output)
}
if !strings.Contains(output, "ARRAY['id']") {
t.Fatalf("expected generated SQL to compare against desired PK columns, got:\n%s", output)
}
if !strings.Contains(output, "NOT current_pk_matches") {
t.Fatalf("expected generated SQL to avoid dropping matching PKs, got:\n%s", output)
}
}
func TestGenerateColumnDefinition_PreservesExplicitTypeModifiers(t *testing.T) {
writer := NewWriter(&writers.WriterOptions{})
cases := []struct {
name string
colType string
length int
precision int
scale int
wantType string
}{
{
name: "character varying already includes length",
colType: "character varying(50)",
length: 50,
wantType: "character varying(50)",
},
{
name: "numeric already includes precision",
colType: "numeric(10,2)",
precision: 10,
scale: 2,
wantType: "numeric(10,2)",
},
{
name: "custom vector modifier preserved",
colType: "vector(1536)",
wantType: "vector(1536)",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
col := models.InitColumn("sample", "events", "public")
col.Type = tc.colType
col.Length = tc.length
col.Precision = tc.precision
col.Scale = tc.scale
def := writer.generateColumnDefinition(col)
if !strings.Contains(def, " "+tc.wantType+" ") && !strings.HasSuffix(def, " "+tc.wantType) {
t.Fatalf("generated definition %q does not contain expected type %q", def, tc.wantType)
}
if strings.Contains(def, ")(") {
t.Fatalf("generated definition %q appears to duplicate modifiers", def)
}
})
}
}
func TestGenerateAddColumnStatements(t *testing.T) {
// Create a test database with tables that have new columns
db := models.InitDatabase("testdb")
@@ -855,3 +1058,82 @@ func TestWriteAddColumnStatements(t *testing.T) {
t.Errorf("Output missing DO block\nFull output:\n%s", output)
}
}
func TestWriteSchema_EmitsGuardedAlterColumnTypeStatements(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("agent_skills", "public")
nameCol := models.InitColumn("name", "agent_skills", "public")
nameCol.Type = "character varying"
nameCol.Length = 255
table.Columns["name"] = nameCol
tagsCol := models.InitColumn("tags", "agent_skills", "public")
tagsCol.Type = "text[]"
table.Columns["tags"] = tagsCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
writer := NewWriter(&writers.WriterOptions{})
writer.writer = &buf
if err := writer.WriteDatabase(db); err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "-- Alter column types for schema: public") {
t.Fatalf("expected alter column type section, got:\n%s", output)
}
if !strings.Contains(output, "pg_catalog.format_type") {
t.Fatalf("expected guarded live-type check, got:\n%s", output)
}
if !strings.Contains(output, "ALTER COLUMN name TYPE character varying(255)") {
t.Fatalf("expected guarded alter for character varying(255), got:\n%s", output)
}
if !strings.Contains(output, "ARRAY['varchar(255)', 'character varying(255)']") {
t.Fatalf("expected equivalent type spellings for varchar guard, got:\n%s", output)
}
if !strings.Contains(output, "ALTER COLUMN tags TYPE text[]") {
t.Fatalf("expected guarded alter for array type, got:\n%s", output)
}
if !strings.Contains(output, `ALTER COLUMN tags TYPE text[] USING tags::text[];`) {
t.Fatalf("expected guarded alter for array type to include USING cast, got:\n%s", output)
}
}
func TestWriteSchema_UsesStorageTypeForSerialAlterStatements(t *testing.T) {
db := models.InitDatabase("testdb")
schema := models.InitSchema("public")
table := models.InitTable("learnings", "public")
idCol := models.InitColumn("id", "learnings", "public")
idCol.Type = "bigserial"
table.Columns["id"] = idCol
schema.Tables = append(schema.Tables, table)
db.Schemas = append(db.Schemas, schema)
var buf bytes.Buffer
writer := NewWriter(&writers.WriterOptions{})
writer.writer = &buf
if err := writer.WriteDatabase(db); err != nil {
t.Fatalf("WriteDatabase failed: %v", err)
}
output := buf.String()
if !strings.Contains(output, "ALTER COLUMN id TYPE bigint") {
t.Fatalf("expected serial alter to use bigint storage type, got:\n%s", output)
}
if strings.Contains(output, "ALTER COLUMN id TYPE bigserial;") {
t.Fatalf("did not expect invalid bigserial alter statement, got:\n%s", output)
}
if !strings.Contains(output, `ALTER COLUMN id TYPE bigint USING id::bigint;`) {
t.Fatalf("expected serial alter to include USING cast, got:\n%s", output)
}
}

View File

@@ -61,7 +61,7 @@ func (w *Writer) databaseToPrisma(db *models.Database) string {
sb.WriteString("\n")
// Write generator block
sb.WriteString(w.generateGenerator())
sb.WriteString(w.generateGenerator(db))
sb.WriteString("\n")
// Process all schemas (typically just one in Prisma)
@@ -114,13 +114,28 @@ func (w *Writer) generateDatasource(db *models.Database) string {
}
// generateGenerator generates the generator block
func (w *Writer) generateGenerator() string {
func (w *Writer) generateGenerator(db *models.Database) string {
if w.usePrisma7Generator(db) {
return `generator client {
provider = "prisma-client"
output = "./generated"
}
`
}
return `generator client {
provider = "prisma-client-js"
}
`
}
func (w *Writer) usePrisma7Generator(db *models.Database) bool {
if w.options != nil && w.options.Prisma7 {
return true
}
return db != nil && db.SourceFormat == "prisma7"
}
// enumToPrisma converts an Enum to Prisma enum block
func (w *Writer) enumToPrisma(enum *models.Enum) string {
var sb strings.Builder

View File

@@ -0,0 +1,52 @@
package prisma
import (
"strings"
"testing"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
func TestGenerateGenerator_DefaultsToPrismaClientJS(t *testing.T) {
t.Parallel()
writer := NewWriter(&writers.WriterOptions{})
db := models.InitDatabase("testdb")
got := writer.generateGenerator(db)
if !strings.Contains(got, `provider = "prisma-client-js"`) {
t.Fatalf("expected prisma-client-js generator, got:\n%s", got)
}
if strings.Contains(got, `output = "./generated"`) {
t.Fatalf("did not expect prisma7 output path in default generator:\n%s", got)
}
}
func TestGenerateGenerator_Prisma7FlagUsesPrismaClient(t *testing.T) {
t.Parallel()
writer := NewWriter(&writers.WriterOptions{Prisma7: true})
db := models.InitDatabase("testdb")
got := writer.generateGenerator(db)
if !strings.Contains(got, `provider = "prisma-client"`) {
t.Fatalf("expected prisma-client generator, got:\n%s", got)
}
if !strings.Contains(got, `output = "./generated"`) {
t.Fatalf("expected prisma7 output path, got:\n%s", got)
}
}
func TestGenerateGenerator_Prisma7SourceFormatUsesPrismaClient(t *testing.T) {
t.Parallel()
writer := NewWriter(&writers.WriterOptions{})
db := models.InitDatabase("testdb")
db.SourceFormat = "prisma7"
got := writer.generateGenerator(db)
if !strings.Contains(got, `provider = "prisma-client"`) {
t.Fatalf("expected prisma-client generator from source format, got:\n%s", got)
}
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/jackc/pgx/v5"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
@@ -42,7 +43,7 @@ func (w *Writer) WriteDatabase(db *models.Database) error {
// Connect to database
ctx := context.Background()
conn, err := pgx.Connect(ctx, connString)
conn, err := pgsql.Connect(ctx, connString, "writer-sqlexec")
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
@@ -72,7 +73,7 @@ func (w *Writer) WriteSchema(schema *models.Schema) error {
// Connect to database
ctx := context.Background()
conn, err := pgx.Connect(ctx, connString)
conn, err := pgsql.Connect(ctx, connString, "writer-sqlexec")
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}

View File

@@ -20,6 +20,18 @@ type Writer interface {
WriteTable(table *models.Table) error
}
// NullableType constants control which Go package is used for nullable column types
// in code-generation writers (Bun, GORM).
const (
// NullableTypeResolveSpec uses github.com/bitechdev/ResolveSpec/pkg/spectypes
// (SqlString, SqlInt32, SqlVector, SqlStringArray, …). This is the default.
NullableTypeResolveSpec = "resolvespec"
// NullableTypeStdlib uses the standard library database/sql nullable types
// (sql.NullString, sql.NullInt32, …) and plain Go slices for arrays.
NullableTypeStdlib = "stdlib"
)
// WriterOptions contains common options for writers
type WriterOptions struct {
// OutputPath is the path where the output should be written
@@ -33,6 +45,15 @@ type WriterOptions struct {
// Useful for databases like SQLite that do not support schemas.
FlattenSchema bool
// NullableTypes selects the Go type package used for nullable columns in
// code-generation writers (bun, gorm). Accepted values:
// "resolvespec" (default) — github.com/bitechdev/ResolveSpec/pkg/spectypes
// "stdlib" — database/sql (sql.NullString, sql.NullInt32, …)
NullableTypes string
// Prisma7 enables Prisma 7-specific output for Prisma writers.
Prisma7 bool
// Additional options can be added here as needed
Metadata map[string]interface{}
}
@@ -92,8 +113,12 @@ func SanitizeFilename(name string) string {
// Examples (bigint): "0" → "0"
// Examples (timestamp): "now()" → "now()" (function call never quoted)
func QuoteDefaultValue(value, sqlType string) string {
value = strings.TrimSpace(value)
// Function calls are never quoted regardless of column type.
if strings.Contains(value, "(") || strings.Contains(value, ")") {
if strings.Contains(value, "(") || strings.Contains(value, ")") ||
strings.Contains(value, "::") ||
strings.HasPrefix(strings.ToUpper(value), "ARRAY[") {
return value
}
@@ -103,6 +128,16 @@ func QuoteDefaultValue(value, sqlType string) string {
baseType = baseType[:idx]
}
if isArraySQLType(baseType) {
if arrayLiteral, ok := normalizeArrayDefaultLiteral(value); ok {
return quoteSQLLiteral(arrayLiteral)
}
}
if isQuotedSQLLiteral(value) {
return value
}
// Types whose default values must NOT be quoted.
unquotedTypes := map[string]bool{
// Integer types
@@ -136,7 +171,32 @@ func QuoteDefaultValue(value, sqlType string) string {
// Everything else (text, varchar, char, uuid, date, time, timestamp, json, …)
// is treated as a quoted literal.
return "'" + value + "'"
return quoteSQLLiteral(value)
}
func isArraySQLType(sqlType string) bool {
return strings.HasSuffix(sqlType, "[]")
}
func normalizeArrayDefaultLiteral(value string) (string, bool) {
switch {
case strings.HasPrefix(value, "''{") && strings.HasSuffix(value, "}''"):
return value[2 : len(value)-2], true
case strings.HasPrefix(value, "'{") && strings.HasSuffix(value, "}'"):
return value[1 : len(value)-1], true
case strings.HasPrefix(value, "{") && strings.HasSuffix(value, "}"):
return value, true
default:
return "", false
}
}
func isQuotedSQLLiteral(value string) bool {
return len(value) >= 2 && strings.HasPrefix(value, "'") && strings.HasSuffix(value, "'")
}
func quoteSQLLiteral(value string) string {
return "'" + strings.ReplaceAll(value, "'", "''") + "'"
}
// SanitizeStructTagValue sanitizes a value to be safely used inside Go struct tags.
@@ -147,7 +207,8 @@ func QuoteDefaultValue(value, sqlType string) string {
// - Returns a clean identifier safe for use in struct tags and field names
func SanitizeStructTagValue(value string) string {
// Remove DBML/DCTX style comments in brackets (e.g., [note: 'description'])
commentRegex := regexp.MustCompile(`\s*\[.*?\]\s*`)
// Require at least one character inside brackets to avoid stripping PostgreSQL array suffix "[]"
commentRegex := regexp.MustCompile(`\s*\[[^\]]+\]\s*`)
value = commentRegex.ReplaceAllString(value, "")
// Trim whitespace

View File

@@ -0,0 +1,54 @@
package writers
import "testing"
func TestQuoteDefaultValue(t *testing.T) {
t.Parallel()
tests := []struct {
name string
value string
sqlType string
want string
}{
{
name: "text default is quoted",
value: "active",
sqlType: "text",
want: "'active'",
},
{
name: "array default from bare literal is quoted once",
value: "{}",
sqlType: "text[]",
want: "'{}'",
},
{
name: "array default from quoted literal is preserved",
value: "'{}'",
sqlType: "text[]",
want: "'{}'",
},
{
name: "array default from double quoted literal is normalized",
value: "''{}''",
sqlType: "text[]",
want: "'{}'",
},
{
name: "function default is left alone",
value: "now()",
sqlType: "timestamptz",
want: "now()",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := QuoteDefaultValue(tt.value, tt.sqlType)
if got != tt.want {
t.Fatalf("QuoteDefaultValue(%q, %q) = %q, want %q", tt.value, tt.sqlType, got, tt.want)
}
})
}
}

View File

@@ -56,7 +56,7 @@ Table admin.audit_logs {
}
// Relationships
Ref: public.posts.user_id > public.users.id [ondelete: CASCADE, onupdate: CASCADE]
Ref: public.comments.post_id > public.posts.id [ondelete: CASCADE]
Ref: public.comments.user_id > public.users.id [ondelete: SET NULL]
Ref: admin.audit_logs.user_id > public.users.id [ondelete: SET NULL]
Ref: public.posts.user_id > public.users.id [delete: CASCADE, update: CASCADE]
Ref: public.comments.post_id > public.posts.id [delete: CASCADE]
Ref: public.comments.user_id > public.users.id [delete: SET NULL]
Ref: admin.audit_logs.user_id > public.users.id [delete: SET NULL]