9 Commits

Author SHA1 Message Date
9f29bc112e Release version
Some checks failed
CI / Test (1.24) (push) Successful in -25m38s
CI / Test (1.25) (push) Successful in -25m40s
CI / Build (push) Successful in -25m59s
CI / Lint (push) Successful in -25m43s
Integration Tests / Integration Tests (push) Failing after -25m43s
Release / Build and Release (push) Successful in -24m3s
2025-12-28 15:12:02 +02:00
b55737ab4c Fixed linting issues
Some checks failed
CI / Test (1.24) (push) Successful in -25m42s
CI / Test (1.25) (push) Successful in -25m40s
CI / Build (push) Successful in -25m54s
CI / Lint (push) Successful in -25m27s
Integration Tests / Integration Tests (push) Failing after -25m48s
2025-12-28 14:51:19 +02:00
2a271b9859 Updated tests
Some checks failed
CI / Test (1.24) (push) Successful in -24m27s
CI / Test (1.25) (push) Successful in -24m28s
CI / Build (push) Successful in -25m56s
CI / Lint (push) Failing after -25m35s
Integration Tests / Integration Tests (push) Failing after -25m38s
2025-12-28 14:35:20 +02:00
beb5b4fac8 Build/test fixes
Some checks failed
CI / Test (1.24) (push) Failing after -24m25s
CI / Test (1.25) (push) Failing after -24m5s
CI / Lint (push) Successful in -25m6s
CI / Build (push) Successful in -25m25s
Integration Tests / Integration Tests (push) Failing after -25m39s
2025-12-28 14:21:57 +02:00
e61204cb3c Fix lint issue
Some checks failed
CI / Test (1.24) (push) Failing after -24m33s
CI / Test (1.25) (push) Failing after -24m19s
CI / Build (push) Successful in -25m58s
CI / Lint (push) Successful in -25m39s
2025-12-28 12:19:04 +02:00
d52b9cdc14 Enhanced godoc 2025-12-28 11:42:05 +02:00
f98b278d72 Added Graphql 2025-12-28 11:41:55 +02:00
666eab7cec Updated Readme files
Some checks failed
CI / Test (1.24) (push) Failing after -24m41s
CI / Test (1.25) (push) Failing after -24m25s
CI / Lint (push) Failing after -25m49s
CI / Build (push) Successful in -26m3s
2025-12-28 10:34:20 +02:00
35bc9dfb5c Added Drizzle ORM support
Some checks failed
CI / Test (1.24) (push) Failing after -24m8s
CI / Test (1.25) (push) Failing after -23m54s
CI / Lint (push) Failing after -25m2s
CI / Build (push) Successful in -25m18s
2025-12-28 10:15:30 +02:00
58 changed files with 8280 additions and 162 deletions

View File

@@ -34,8 +34,8 @@ jobs:
- name: Download dependencies - name: Download dependencies
run: go mod download run: go mod download
- name: Run tests - name: Run unit tests
run: go test -v -race -coverprofile=coverage.out -covermode=atomic ./... run: make test
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v4 uses: codecov/codecov-action@v4
@@ -55,13 +55,15 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: '1.24' go-version: '1.25'
- name: golangci-lint - name: Install golangci-lint
uses: golangci/golangci-lint-action@v9 run: |
with: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest
version: latest echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
args: --config=.golangci.json
- name: Run linter
run: make lint
build: build:
name: Build name: Build
@@ -74,10 +76,22 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: '1.24' go-version: '1.25'
- name: Build - name: Download dependencies
run: go build -v ./cmd/relspec run: go mod download
- name: Build binary
run: make build
- name: Verify binary exists
run: |
if [ ! -f build/relspec ]; then
echo "Error: Binary not found at build/relspec"
exit 1
fi
echo "Build successful: build/relspec"
ls -lh build/relspec
- name: Check mod tidiness - name: Check mod tidiness
run: | run: |

91
.github/workflows/integration-tests.yml vendored Normal file
View File

@@ -0,0 +1,91 @@
name: Integration Tests
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.25'
- name: Cache Go modules
uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Download dependencies
run: go mod download
- name: Start PostgreSQL container
run: |
docker run -d \
--name relspec-test-postgres \
--network host \
-e POSTGRES_USER=relspec \
-e POSTGRES_PASSWORD=relspec_test_password \
-e POSTGRES_DB=relspec_test \
postgres:16-alpine
- name: Wait for PostgreSQL to be ready
run: |
echo "Waiting for PostgreSQL to start..."
for i in {1..30}; do
if docker exec relspec-test-postgres pg_isready -U relspec -d relspec_test > /dev/null 2>&1; then
echo "PostgreSQL is ready!"
break
fi
echo "Waiting... ($i/30)"
sleep 1
done
sleep 2
- name: Copy init script into container
run: |
docker cp tests/postgres/init.sql relspec-test-postgres:/tmp/init.sql
- name: Initialize test database
run: |
docker exec relspec-test-postgres psql -U relspec -d relspec_test -f /tmp/init.sql
- name: Verify database setup
run: |
echo "Verifying database initialization..."
docker exec relspec-test-postgres psql -U relspec -d relspec_test -c "
SELECT
(SELECT COUNT(*) FROM pg_namespace WHERE nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast') AND nspname NOT LIKE 'pg_%') as schemas,
(SELECT COUNT(*) FROM pg_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as tables,
(SELECT COUNT(*) FROM pg_views WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as views,
(SELECT COUNT(*) FROM pg_sequences WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as sequences;
"
- name: Run integration tests
env:
RELSPEC_TEST_PG_CONN: postgres://relspec:relspec_test_password@localhost:5432/relspec_test
run: make test-integration
- name: Stop PostgreSQL container
if: always()
run: |
docker stop relspec-test-postgres || true
docker rm relspec-test-postgres || true
- name: Summary
if: always()
run: |
echo "Integration tests completed."
echo "PostgreSQL container has been cleaned up."

116
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,116 @@
name: Release
on:
push:
tags:
- 'v*.*.*'
jobs:
build-and-release:
name: Build and Release
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.25'
- name: Get version from tag
id: get_version
run: |
echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
echo "Version: ${GITHUB_REF#refs/tags/}"
- name: Build binaries for multiple platforms
run: |
mkdir -p dist
# Linux AMD64
GOOS=linux GOARCH=amd64 go build -o dist/relspec-linux-amd64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
# Linux ARM64
GOOS=linux GOARCH=arm64 go build -o dist/relspec-linux-arm64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
# macOS AMD64
GOOS=darwin GOARCH=amd64 go build -o dist/relspec-darwin-amd64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
# macOS ARM64 (Apple Silicon)
GOOS=darwin GOARCH=arm64 go build -o dist/relspec-darwin-arm64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
# Windows AMD64
GOOS=windows GOARCH=amd64 go build -o dist/relspec-windows-amd64.exe -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
# Create checksums
cd dist
sha256sum * > checksums.txt
cd ..
- name: Generate release notes
id: release_notes
run: |
# Get the previous tag
previous_tag=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "")
if [ -z "$previous_tag" ]; then
# No previous tag, get all commits
commits=$(git log --pretty=format:"- %s (%h)" --no-merges)
else
# Get commits since the previous tag
commits=$(git log "${previous_tag}..HEAD" --pretty=format:"- %s (%h)" --no-merges)
fi
# Create release notes
cat > release_notes.md << EOF
# Release ${{ steps.get_version.outputs.VERSION }}
## Changes
${commits}
## Installation
Download the appropriate binary for your platform:
- **Linux (AMD64)**: \`relspec-linux-amd64\`
- **Linux (ARM64)**: \`relspec-linux-arm64\`
- **macOS (Intel)**: \`relspec-darwin-amd64\`
- **macOS (Apple Silicon)**: \`relspec-darwin-arm64\`
- **Windows (AMD64)**: \`relspec-windows-amd64.exe\`
Make the binary executable (Linux/macOS):
\`\`\`bash
chmod +x relspec-*
\`\`\`
Verify the download with the provided checksums.
EOF
- name: Create Release
uses: softprops/action-gh-release@v1
with:
body_path: release_notes.md
files: |
dist/relspec-linux-amd64
dist/relspec-linux-arm64
dist/relspec-darwin-amd64
dist/relspec-darwin-arm64
dist/relspec-windows-amd64.exe
dist/checksums.txt
draft: false
prerelease: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Summary
run: |
echo "Release ${{ steps.get_version.outputs.VERSION }} created successfully!"
echo "Binaries built for:"
echo " - Linux (amd64, arm64)"
echo " - macOS (amd64, arm64)"
echo " - Windows (amd64)"

35
AI_USE.md Normal file
View File

@@ -0,0 +1,35 @@
# AI Usage Declaration
This Go project utilizes AI tools for the following purposes:
- Generating and improving documentation
- Writing and enhancing tests
- Refactoring and optimizing existing code
AI is **not** used for core design or architecture decisions.
All design decisions are deferred to human discussion.
AI is employed only for enhancements to human-written code.
We are aware of significant AI hallucinations; all AI-generated content is to be reviewed and verified by humans.
.-""""""-.
.' '.
/ O O \
: ` :
| |
: .------. :
\ ' ' /
'. .'
'-......-'
MEGAMIND AI
[============]
___________
/___________\
/_____________\
| ASSIMILATE |
| RESISTANCE |
| IS FUTILE |
\_____________/
\___________/

View File

@@ -1,4 +1,4 @@
.PHONY: all build test lint coverage clean install help docker-up docker-down docker-test docker-test-integration .PHONY: all build test test-unit test-integration lint coverage clean install help docker-up docker-down docker-test docker-test-integration release release-version
# Binary name # Binary name
BINARY_NAME=relspec BINARY_NAME=relspec
@@ -22,9 +22,23 @@ build: ## Build the binary
$(GOBUILD) -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/relspec $(GOBUILD) -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/relspec
@echo "Build complete: $(BUILD_DIR)/$(BINARY_NAME)" @echo "Build complete: $(BUILD_DIR)/$(BINARY_NAME)"
test: ## Run tests test: test-unit ## Run all unit tests (alias for test-unit)
@echo "Running tests..."
$(GOTEST) -v -race -coverprofile=coverage.out ./... test-unit: ## Run unit tests (excludes integration tests)
@echo "Running unit tests..."
$(GOTEST) -v -race -coverprofile=coverage.out -covermode=atomic $$(go list ./... | grep -v '/tests/integration' | grep -v '/tests/assets' | grep -v '/pkg/readers/pgsql')
test-integration: ## Run integration tests (requires RELSPEC_TEST_PG_CONN environment variable)
@echo "Running integration tests..."
@if [ -z "$$RELSPEC_TEST_PG_CONN" ]; then \
echo "Error: RELSPEC_TEST_PG_CONN environment variable is not set"; \
echo "Example: export RELSPEC_TEST_PG_CONN='postgres://relspec:relspec_test_password@localhost:5432/relspec_test'"; \
exit 1; \
fi
@echo "Running PostgreSQL reader tests..."
$(GOTEST) -v -count=1 ./pkg/readers/pgsql/
@echo "Running general integration tests..."
$(GOTEST) -v -count=1 ./tests/integration/
coverage: test ## Run tests with coverage report coverage: test ## Run tests with coverage report
@echo "Generating coverage report..." @echo "Generating coverage report..."
@@ -98,5 +112,55 @@ docker-test-integration: docker-up ## Start DB and run integration tests
$(GOTEST) -v ./pkg/readers/pgsql/ -count=1 || (make docker-down && exit 1) $(GOTEST) -v ./pkg/readers/pgsql/ -count=1 || (make docker-down && exit 1)
@make docker-down @make docker-down
release: ## Create and push a new release tag (auto-increments patch version)
@echo "Creating new release..."
@latest_tag=$$(git describe --tags --abbrev=0 2>/dev/null || echo ""); \
if [ -z "$$latest_tag" ]; then \
version="v1.0.0"; \
echo "No existing tags found. Creating first release: $$version"; \
commit_logs=$$(git log --pretty=format:"- %s" --no-merges); \
else \
echo "Latest tag: $$latest_tag"; \
version_number=$${latest_tag#v}; \
IFS='.' read -r major minor patch <<< "$$version_number"; \
patch=$$((patch + 1)); \
version="v$$major.$$minor.$$patch"; \
echo "Creating new release: $$version"; \
commit_logs=$$(git log "$${latest_tag}..HEAD" --pretty=format:"- %s" --no-merges); \
fi; \
if [ -z "$$commit_logs" ]; then \
tag_message="Release $$version"; \
else \
tag_message="Release $$version\n\n$$commit_logs"; \
fi; \
git tag -a "$$version" -m "$$tag_message"; \
git push origin "$$version"; \
echo "Tag $$version created and pushed to remote repository."
release-version: ## Create and push a release with specific version (use: make release-version VERSION=v1.2.3)
@if [ -z "$(VERSION)" ]; then \
echo "Error: VERSION is required. Usage: make release-version VERSION=v1.2.3"; \
exit 1; \
fi
@version="$(VERSION)"; \
if ! echo "$$version" | grep -q "^v"; then \
version="v$$version"; \
fi; \
echo "Creating release: $$version"; \
latest_tag=$$(git describe --tags --abbrev=0 2>/dev/null || echo ""); \
if [ -z "$$latest_tag" ]; then \
commit_logs=$$(git log --pretty=format:"- %s" --no-merges); \
else \
commit_logs=$$(git log "$${latest_tag}..HEAD" --pretty=format:"- %s" --no-merges); \
fi; \
if [ -z "$$commit_logs" ]; then \
tag_message="Release $$version"; \
else \
tag_message="Release $$version\n\n$$commit_logs"; \
fi; \
git tag -a "$$version" -m "$$tag_message"; \
git push origin "$$version"; \
echo "Tag $$version created and pushed to remote repository."
help: ## Display this help screen help: ## Display this help screen
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'

View File

@@ -16,19 +16,53 @@ RelSpec provides bidirectional conversion and comparison between various databas
## Features ## Features
### Input Formats ### Readers (Input Formats)
- **XML** - Generic XML schema definitions
- **JSON** - JSON-based schema specifications
- **Clarion DCTX** - Clarion database dictionary format
- **Database Inspection** - Direct database introspection
- **GORM Models** - Read existing GORM Go structs
- **Bun Models** - Read existing Bun Go structs
### Output Formats RelSpec can read database schemas from multiple sources:
- **GORM Models** - Generate GORM-compatible Go structs
- **Bun Models** - Generate Bun-compatible Go structs #### ORM Models
- **JSON** - Standard JSON schema output - [GORM](pkg/readers/gorm/README.md) - Go GORM model definitions
- **YAML** - Human-readable YAML format - [Bun](pkg/readers/bun/README.md) - Go Bun model definitions
- [Drizzle](pkg/readers/drizzle/README.md) - TypeScript Drizzle ORM schemas
- [Prisma](pkg/readers/prisma/README.md) - Prisma schema language
- [TypeORM](pkg/readers/typeorm/README.md) - TypeScript TypeORM entities
#### Database Inspection
- [PostgreSQL](pkg/readers/pgsql/README.md) - Direct PostgreSQL database introspection
#### Schema Formats
- [DBML](pkg/readers/dbml/README.md) - Database Markup Language (dbdiagram.io)
- [DCTX](pkg/readers/dctx/README.md) - Clarion database dictionary format
- [DrawDB](pkg/readers/drawdb/README.md) - DrawDB JSON format
- [GraphQL](pkg/readers/graphql/README.md) - GraphQL Schema Definition Language (SDL)
- [JSON](pkg/readers/json/README.md) - RelSpec canonical JSON format
- [YAML](pkg/readers/yaml/README.md) - RelSpec canonical YAML format
### Writers (Output Formats)
RelSpec can write database schemas to multiple formats:
#### ORM Models
- [GORM](pkg/writers/gorm/README.md) - Generate GORM-compatible Go structs
- [Bun](pkg/writers/bun/README.md) - Generate Bun-compatible Go structs
- [Drizzle](pkg/writers/drizzle/README.md) - Generate Drizzle ORM TypeScript schemas
- [Prisma](pkg/writers/prisma/README.md) - Generate Prisma schema files
- [TypeORM](pkg/writers/typeorm/README.md) - Generate TypeORM TypeScript entities
#### Database DDL
- [PostgreSQL](pkg/writers/pgsql/README.md) - PostgreSQL DDL (CREATE TABLE, etc.)
#### Schema Formats
- [DBML](pkg/writers/dbml/README.md) - Database Markup Language
- [DCTX](pkg/writers/dctx/README.md) - Clarion database dictionary format
- [DrawDB](pkg/writers/drawdb/README.md) - DrawDB JSON format
- [GraphQL](pkg/writers/graphql/README.md) - GraphQL Schema Definition Language (SDL)
- [JSON](pkg/writers/json/README.md) - RelSpec canonical JSON format
- [YAML](pkg/writers/yaml/README.md) - RelSpec canonical YAML format
## Use of AI
[Rules and use of AI](./AI_USE.md)
## Installation ## Installation
@@ -94,7 +128,7 @@ go test ./...
Apache License 2.0 - See [LICENSE](LICENSE) for details. Apache License 2.0 - See [LICENSE](LICENSE) for details.
Copyright 2025 wdevs Copyright 2025 Warky Devs
## Contributing ## Contributing

View File

@@ -9,15 +9,16 @@
- [ ] MSSQL driver - [ ] MSSQL driver
- [✔️] Foreign key detection - [✔️] Foreign key detection
- [✔️] Index extraction - [✔️] Index extraction
- [ ] .sql file generation with sequence and priority - [*] .sql file generation with sequence and priority
- [✔️] .dbml: Database Markup Language (DBML) for textual schema representation. - [✔️] .dbml: Database Markup Language (DBML) for textual schema representation.
- [✔️] Prisma schema support (PSL format) .prisma - [✔️] Prisma schema support (PSL format) .prisma
- [✔️] Drizzle ORM support .ts (TypeScript / JavaScript) (Mr. Edd wanted to move from Prisma to Drizzle. If you are bugs, you are welcome to do pull requests or issues)
- [☠️] Entity Framework (.NET) model .edmx (Fuck no, EDMX files were bloated, verbose XML nightmares—hard to merge, error-prone, and a pain in teams. Microsoft wisely ditched them in EF Core for code-first. Classic overkill from old MS era.) - [☠️] Entity Framework (.NET) model .edmx (Fuck no, EDMX files were bloated, verbose XML nightmares—hard to merge, error-prone, and a pain in teams. Microsoft wisely ditched them in EF Core for code-first. Classic overkill from old MS era.)
- [✔️] TypeORM support - [✔️] TypeORM support
- [] .hbm.xml / schema.xml: Hibernate/Propel mappings (Java/PHP) (💲 Someone can do this, not me) - [] .hbm.xml / schema.xml: Hibernate/Propel mappings (Java/PHP) (💲 Someone can do this, not me)
- [ ] Django models.py (Python classes), Sequelize migrations (JS) (💲 Someone can do this, not me) - [ ] Django models.py (Python classes), Sequelize migrations (JS) (💲 Someone can do this, not me)
- [] .avsc: Avro schema (JSON format for data serialization) (💲 Someone can do this, not me) - [] .avsc: Avro schema (JSON format for data serialization) (💲 Someone can do this, not me)
- [✔️] GraphQL schema generation
## Documentation ## Documentation
@@ -36,7 +37,7 @@
- [ ] Web UI for visual editing - [ ] Web UI for visual editing
- [ ] REST API server mode - [ ] REST API server mode
- [ ] Support for NoSQL databases - [ ] Support for NoSQL databases
- [ ] GraphQL schema generation
## Performance ## Performance
- [ ] Concurrent processing for multiple tables - [ ] Concurrent processing for multiple tables

View File

@@ -14,7 +14,9 @@ import (
"git.warky.dev/wdevs/relspecgo/pkg/readers/dbml" "git.warky.dev/wdevs/relspecgo/pkg/readers/dbml"
"git.warky.dev/wdevs/relspecgo/pkg/readers/dctx" "git.warky.dev/wdevs/relspecgo/pkg/readers/dctx"
"git.warky.dev/wdevs/relspecgo/pkg/readers/drawdb" "git.warky.dev/wdevs/relspecgo/pkg/readers/drawdb"
"git.warky.dev/wdevs/relspecgo/pkg/readers/drizzle"
"git.warky.dev/wdevs/relspecgo/pkg/readers/gorm" "git.warky.dev/wdevs/relspecgo/pkg/readers/gorm"
"git.warky.dev/wdevs/relspecgo/pkg/readers/graphql"
"git.warky.dev/wdevs/relspecgo/pkg/readers/json" "git.warky.dev/wdevs/relspecgo/pkg/readers/json"
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql" "git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
"git.warky.dev/wdevs/relspecgo/pkg/readers/prisma" "git.warky.dev/wdevs/relspecgo/pkg/readers/prisma"
@@ -25,7 +27,9 @@ import (
wdbml "git.warky.dev/wdevs/relspecgo/pkg/writers/dbml" wdbml "git.warky.dev/wdevs/relspecgo/pkg/writers/dbml"
wdctx "git.warky.dev/wdevs/relspecgo/pkg/writers/dctx" wdctx "git.warky.dev/wdevs/relspecgo/pkg/writers/dctx"
wdrawdb "git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb" wdrawdb "git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
wdrizzle "git.warky.dev/wdevs/relspecgo/pkg/writers/drizzle"
wgorm "git.warky.dev/wdevs/relspecgo/pkg/writers/gorm" wgorm "git.warky.dev/wdevs/relspecgo/pkg/writers/gorm"
wgraphql "git.warky.dev/wdevs/relspecgo/pkg/writers/graphql"
wjson "git.warky.dev/wdevs/relspecgo/pkg/writers/json" wjson "git.warky.dev/wdevs/relspecgo/pkg/writers/json"
wpgsql "git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql" wpgsql "git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql"
wprisma "git.warky.dev/wdevs/relspecgo/pkg/writers/prisma" wprisma "git.warky.dev/wdevs/relspecgo/pkg/writers/prisma"
@@ -56,10 +60,12 @@ Input formats:
- dbml: DBML schema files - dbml: DBML schema files
- dctx: DCTX schema files - dctx: DCTX schema files
- drawdb: DrawDB JSON files - drawdb: DrawDB JSON files
- graphql: GraphQL schema files (.graphql, SDL)
- json: JSON database schema - json: JSON database schema
- yaml: YAML database schema - yaml: YAML database schema
- gorm: GORM model files (Go, file or directory) - gorm: GORM model files (Go, file or directory)
- bun: Bun model files (Go, file or directory) - bun: Bun model files (Go, file or directory)
- drizzle: Drizzle ORM schema files (TypeScript, file or directory)
- prisma: Prisma schema files (.prisma) - prisma: Prisma schema files (.prisma)
- typeorm: TypeORM entity files (TypeScript) - typeorm: TypeORM entity files (TypeScript)
- pgsql: PostgreSQL database (live connection) - pgsql: PostgreSQL database (live connection)
@@ -68,10 +74,12 @@ Output formats:
- dbml: DBML schema files - dbml: DBML schema files
- dctx: DCTX schema files - dctx: DCTX schema files
- drawdb: DrawDB JSON files - drawdb: DrawDB JSON files
- graphql: GraphQL schema files (.graphql, SDL)
- json: JSON database schema - json: JSON database schema
- yaml: YAML database schema - yaml: YAML database schema
- gorm: GORM model files (Go) - gorm: GORM model files (Go)
- bun: Bun model files (Go) - bun: Bun model files (Go)
- drizzle: Drizzle ORM schema files (TypeScript)
- prisma: Prisma schema files (.prisma) - prisma: Prisma schema files (.prisma)
- typeorm: TypeORM entity files (TypeScript) - typeorm: TypeORM entity files (TypeScript)
- pgsql: PostgreSQL SQL schema - pgsql: PostgreSQL SQL schema
@@ -132,11 +140,11 @@ Examples:
} }
func init() { func init() {
convertCmd.Flags().StringVar(&convertSourceType, "from", "", "Source format (dbml, dctx, drawdb, json, yaml, gorm, bun, prisma, typeorm, pgsql)") convertCmd.Flags().StringVar(&convertSourceType, "from", "", "Source format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql)")
convertCmd.Flags().StringVar(&convertSourcePath, "from-path", "", "Source file path (for file-based formats)") convertCmd.Flags().StringVar(&convertSourcePath, "from-path", "", "Source file path (for file-based formats)")
convertCmd.Flags().StringVar(&convertSourceConn, "from-conn", "", "Source connection string (for database formats)") convertCmd.Flags().StringVar(&convertSourceConn, "from-conn", "", "Source connection string (for database formats)")
convertCmd.Flags().StringVar(&convertTargetType, "to", "", "Target format (dbml, dctx, drawdb, json, yaml, gorm, bun, prisma, typeorm, pgsql)") convertCmd.Flags().StringVar(&convertTargetType, "to", "", "Target format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql)")
convertCmd.Flags().StringVar(&convertTargetPath, "to-path", "", "Target output path (file or directory)") convertCmd.Flags().StringVar(&convertTargetPath, "to-path", "", "Target output path (file or directory)")
convertCmd.Flags().StringVar(&convertPackageName, "package", "", "Package name (for code generation formats like gorm/bun)") convertCmd.Flags().StringVar(&convertPackageName, "package", "", "Package name (for code generation formats like gorm/bun)")
convertCmd.Flags().StringVar(&convertSchemaFilter, "schema", "", "Filter to a specific schema by name (required for formats like dctx that only support single schemas)") convertCmd.Flags().StringVar(&convertSchemaFilter, "schema", "", "Filter to a specific schema by name (required for formats like dctx that only support single schemas)")
@@ -257,6 +265,12 @@ func readDatabaseForConvert(dbType, filePath, connString string) (*models.Databa
} }
reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath}) reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath})
case "drizzle":
if filePath == "" {
return nil, fmt.Errorf("file path is required for Drizzle format")
}
reader = drizzle.NewReader(&readers.ReaderOptions{FilePath: filePath})
case "prisma": case "prisma":
if filePath == "" { if filePath == "" {
return nil, fmt.Errorf("file path is required for Prisma format") return nil, fmt.Errorf("file path is required for Prisma format")
@@ -269,6 +283,12 @@ func readDatabaseForConvert(dbType, filePath, connString string) (*models.Databa
} }
reader = typeorm.NewReader(&readers.ReaderOptions{FilePath: filePath}) reader = typeorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
case "graphql", "gql":
if filePath == "" {
return nil, fmt.Errorf("file path is required for GraphQL format")
}
reader = graphql.NewReader(&readers.ReaderOptions{FilePath: filePath})
default: default:
return nil, fmt.Errorf("unsupported source format: %s", dbType) return nil, fmt.Errorf("unsupported source format: %s", dbType)
} }
@@ -317,6 +337,9 @@ func writeDatabase(db *models.Database, dbType, outputPath, packageName, schemaF
} }
writer = wbun.NewWriter(writerOpts) writer = wbun.NewWriter(writerOpts)
case "drizzle":
writer = wdrizzle.NewWriter(writerOpts)
case "pgsql", "postgres", "postgresql", "sql": case "pgsql", "postgres", "postgresql", "sql":
writer = wpgsql.NewWriter(writerOpts) writer = wpgsql.NewWriter(writerOpts)
@@ -326,6 +349,9 @@ func writeDatabase(db *models.Database, dbType, outputPath, packageName, schemaF
case "typeorm": case "typeorm":
writer = wtypeorm.NewWriter(writerOpts) writer = wtypeorm.NewWriter(writerOpts)
case "graphql", "gql":
writer = wgraphql.NewWriter(writerOpts)
default: default:
return fmt.Errorf("unsupported target format: %s", dbType) return fmt.Errorf("unsupported target format: %s", dbType)
} }

View File

@@ -1,71 +0,0 @@
#!/bin/bash
# Ask if the user wants to make a release version
read -p "Do you want to make a release version? (y/n): " make_release
if [[ $make_release =~ ^[Yy]$ ]]; then
# Get the latest tag from git
latest_tag=$(git describe --tags --abbrev=0 2>/dev/null)
if [ -z "$latest_tag" ]; then
# No tags exist yet, start with v1.0.0
suggested_version="v1.0.0"
echo "No existing tags found. Starting with $suggested_version"
else
echo "Latest tag: $latest_tag"
# Remove 'v' prefix if present
version_number="${latest_tag#v}"
# Split version into major.minor.patch
IFS='.' read -r major minor patch <<< "$version_number"
# Increment patch version
patch=$((patch + 1))
# Construct new version
suggested_version="v${major}.${minor}.${patch}"
echo "Suggested next version: $suggested_version"
fi
# Ask the user for the version number with the suggested version as default
read -p "Enter the version number (press Enter for $suggested_version): " version
# Use suggested version if user pressed Enter without input
if [ -z "$version" ]; then
version="$suggested_version"
fi
# Prepend 'v' to the version if it doesn't start with it
if ! [[ $version =~ ^v ]]; then
version="v$version"
fi
# Get commit logs since the last tag
if [ -z "$latest_tag" ]; then
# No previous tag, get all commits
commit_logs=$(git log --pretty=format:"- %s" --no-merges)
else
# Get commits since the last tag
commit_logs=$(git log "${latest_tag}..HEAD" --pretty=format:"- %s" --no-merges)
fi
# Create the tag message
if [ -z "$commit_logs" ]; then
tag_message="Release $version"
else
tag_message="Release $version
${commit_logs}"
fi
# Create an annotated tag with the commit logs
git tag -a "$version" -m "$tag_message"
# Push the tag to the remote repository
git push origin "$version"
echo "Tag $version created and pushed to the remote repository."
else
echo "No release version created."
fi

View File

@@ -2,7 +2,13 @@ package models
import "encoding/xml" import "encoding/xml"
// DCTXDictionary represents the root element of a DCTX file // DCTX File Format Models
//
// This file defines the data structures for parsing and generating DCTX
// (Data Dictionary) XML files, which are used by Clarion development tools
// for database schema definitions.
// DCTXDictionary represents the root element of a DCTX file.
type DCTXDictionary struct { type DCTXDictionary struct {
XMLName xml.Name `xml:"Dictionary"` XMLName xml.Name `xml:"Dictionary"`
Name string `xml:"Name,attr"` Name string `xml:"Name,attr"`
@@ -11,7 +17,7 @@ type DCTXDictionary struct {
Relations []DCTXRelation `xml:"Relation,omitempty"` Relations []DCTXRelation `xml:"Relation,omitempty"`
} }
// DCTXTable represents a table definition in DCTX // DCTXTable represents a table definition in DCTX format.
type DCTXTable struct { type DCTXTable struct {
Guid string `xml:"Guid,attr"` Guid string `xml:"Guid,attr"`
Name string `xml:"Name,attr"` Name string `xml:"Name,attr"`
@@ -25,7 +31,8 @@ type DCTXTable struct {
Options []DCTXOption `xml:"Option,omitempty"` Options []DCTXOption `xml:"Option,omitempty"`
} }
// DCTXField represents a field/column definition in DCTX // DCTXField represents a field/column definition in DCTX format.
// Fields can be nested for GROUP structures.
type DCTXField struct { type DCTXField struct {
Guid string `xml:"Guid,attr"` Guid string `xml:"Guid,attr"`
Name string `xml:"Name,attr"` Name string `xml:"Name,attr"`
@@ -37,7 +44,7 @@ type DCTXField struct {
Options []DCTXOption `xml:"Option,omitempty"` Options []DCTXOption `xml:"Option,omitempty"`
} }
// DCTXKey represents an index or key definition in DCTX // DCTXKey represents an index or key definition in DCTX format.
type DCTXKey struct { type DCTXKey struct {
Guid string `xml:"Guid,attr"` Guid string `xml:"Guid,attr"`
Name string `xml:"Name,attr"` Name string `xml:"Name,attr"`
@@ -49,7 +56,7 @@ type DCTXKey struct {
Components []DCTXComponent `xml:"Component"` Components []DCTXComponent `xml:"Component"`
} }
// DCTXComponent represents a component of a key (field reference) // DCTXComponent represents a component of a key, referencing a field in the index.
type DCTXComponent struct { type DCTXComponent struct {
Guid string `xml:"Guid,attr"` Guid string `xml:"Guid,attr"`
FieldId string `xml:"FieldId,attr,omitempty"` FieldId string `xml:"FieldId,attr,omitempty"`
@@ -57,14 +64,14 @@ type DCTXComponent struct {
Ascend bool `xml:"Ascend,attr,omitempty"` Ascend bool `xml:"Ascend,attr,omitempty"`
} }
// DCTXOption represents a property option in DCTX // DCTXOption represents a property option in DCTX format for metadata storage.
type DCTXOption struct { type DCTXOption struct {
Property string `xml:"Property,attr"` Property string `xml:"Property,attr"`
PropertyType string `xml:"PropertyType,attr,omitempty"` PropertyType string `xml:"PropertyType,attr,omitempty"`
PropertyValue string `xml:"PropertyValue,attr"` PropertyValue string `xml:"PropertyValue,attr"`
} }
// DCTXRelation represents a relationship/foreign key in DCTX // DCTXRelation represents a relationship/foreign key in DCTX format.
type DCTXRelation struct { type DCTXRelation struct {
Guid string `xml:"Guid,attr"` Guid string `xml:"Guid,attr"`
PrimaryTable string `xml:"PrimaryTable,attr"` PrimaryTable string `xml:"PrimaryTable,attr"`
@@ -77,7 +84,7 @@ type DCTXRelation struct {
PrimaryMappings []DCTXFieldMapping `xml:"PrimaryMapping,omitempty"` PrimaryMappings []DCTXFieldMapping `xml:"PrimaryMapping,omitempty"`
} }
// DCTXFieldMapping represents a field mapping in a relation // DCTXFieldMapping represents a field mapping in a relation for multi-column foreign keys.
type DCTXFieldMapping struct { type DCTXFieldMapping struct {
Guid string `xml:"Guid,attr"` Guid string `xml:"Guid,attr"`
Field string `xml:"Field,attr"` Field string `xml:"Field,attr"`

View File

@@ -2,11 +2,14 @@ package models
import "fmt" import "fmt"
// ============================================================================= // Flat/Denormalized Views
// Flat/Denormalized Views - Flattened structures with fully qualified names //
// ============================================================================= // This file provides flattened data structures with fully qualified names
// for easier querying and analysis of database schemas without navigating
// nested hierarchies.
// FlatColumn represents a column with full context in a single structure // FlatColumn represents a column with full database context in a single structure.
// It includes fully qualified names for easy identification and querying.
type FlatColumn struct { type FlatColumn struct {
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"` DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"` SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
@@ -25,7 +28,7 @@ type FlatColumn struct {
Comment string `json:"comment,omitempty" yaml:"comment,omitempty" xml:"comment,omitempty"` Comment string `json:"comment,omitempty" yaml:"comment,omitempty" xml:"comment,omitempty"`
} }
// ToFlatColumns converts a Database to a slice of FlatColumns // ToFlatColumns converts a Database to a slice of FlatColumns for denormalized access to all columns.
func (d *Database) ToFlatColumns() []*FlatColumn { func (d *Database) ToFlatColumns() []*FlatColumn {
flatColumns := make([]*FlatColumn, 0) flatColumns := make([]*FlatColumn, 0)
@@ -56,7 +59,7 @@ func (d *Database) ToFlatColumns() []*FlatColumn {
return flatColumns return flatColumns
} }
// FlatTable represents a table with full context // FlatTable represents a table with full database context and aggregated counts.
type FlatTable struct { type FlatTable struct {
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"` DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"` SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
@@ -70,7 +73,7 @@ type FlatTable struct {
IndexCount int `json:"index_count" yaml:"index_count" xml:"index_count"` IndexCount int `json:"index_count" yaml:"index_count" xml:"index_count"`
} }
// ToFlatTables converts a Database to a slice of FlatTables // ToFlatTables converts a Database to a slice of FlatTables for denormalized access to all tables.
func (d *Database) ToFlatTables() []*FlatTable { func (d *Database) ToFlatTables() []*FlatTable {
flatTables := make([]*FlatTable, 0) flatTables := make([]*FlatTable, 0)
@@ -94,7 +97,7 @@ func (d *Database) ToFlatTables() []*FlatTable {
return flatTables return flatTables
} }
// FlatConstraint represents a constraint with full context // FlatConstraint represents a constraint with full database context and resolved references.
type FlatConstraint struct { type FlatConstraint struct {
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"` DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"` SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
@@ -112,7 +115,7 @@ type FlatConstraint struct {
OnUpdate string `json:"on_update,omitempty" yaml:"on_update,omitempty" xml:"on_update,omitempty"` OnUpdate string `json:"on_update,omitempty" yaml:"on_update,omitempty" xml:"on_update,omitempty"`
} }
// ToFlatConstraints converts a Database to a slice of FlatConstraints // ToFlatConstraints converts a Database to a slice of FlatConstraints for denormalized access to all constraints.
func (d *Database) ToFlatConstraints() []*FlatConstraint { func (d *Database) ToFlatConstraints() []*FlatConstraint {
flatConstraints := make([]*FlatConstraint, 0) flatConstraints := make([]*FlatConstraint, 0)
@@ -148,7 +151,7 @@ func (d *Database) ToFlatConstraints() []*FlatConstraint {
return flatConstraints return flatConstraints
} }
// FlatRelationship represents a relationship with full context // FlatRelationship represents a relationship with full database context and fully qualified table names.
type FlatRelationship struct { type FlatRelationship struct {
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"` DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
RelationshipName string `json:"relationship_name" yaml:"relationship_name" xml:"relationship_name"` RelationshipName string `json:"relationship_name" yaml:"relationship_name" xml:"relationship_name"`
@@ -164,7 +167,7 @@ type FlatRelationship struct {
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
} }
// ToFlatRelationships converts a Database to a slice of FlatRelationships // ToFlatRelationships converts a Database to a slice of FlatRelationships for denormalized access to all relationships.
func (d *Database) ToFlatRelationships() []*FlatRelationship { func (d *Database) ToFlatRelationships() []*FlatRelationship {
flatRelationships := make([]*FlatRelationship, 0) flatRelationships := make([]*FlatRelationship, 0)

View File

@@ -1,13 +1,19 @@
// Package models provides the core data structures for representing database schemas.
// It defines types for databases, schemas, tables, columns, relationships, constraints,
// indexes, views, sequences, and other database objects. These models serve as the
// intermediate representation for converting between various database schema formats.
package models package models
import "strings" import "strings"
// DatabaseType represents the type of database system.
type DatabaseType string type DatabaseType string
// Supported database types.
const ( const (
PostgresqlDatabaseType DatabaseType = "pgsql" PostgresqlDatabaseType DatabaseType = "pgsql" // PostgreSQL database
MSSQLDatabaseType DatabaseType = "mssql" MSSQLDatabaseType DatabaseType = "mssql" // Microsoft SQL Server database
SqlLiteDatabaseType DatabaseType = "sqlite" SqlLiteDatabaseType DatabaseType = "sqlite" // SQLite database
) )
// Database represents the complete database schema // Database represents the complete database schema
@@ -21,11 +27,13 @@ type Database struct {
SourceFormat string `json:"source_format,omitempty" yaml:"source_format,omitempty" xml:"source_format,omitempty"` // Source Format of the database. SourceFormat string `json:"source_format,omitempty" yaml:"source_format,omitempty" xml:"source_format,omitempty"` // Source Format of the database.
} }
// SQLNamer returns the database name in lowercase // SQLName returns the database name in lowercase for SQL compatibility.
func (d *Database) SQLName() string { func (d *Database) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// Schema represents a database schema, which is a logical grouping of database objects
// such as tables, views, sequences, and relationships within a database.
type Schema struct { type Schema struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
@@ -43,11 +51,13 @@ type Schema struct {
Enums []*Enum `json:"enums,omitempty" yaml:"enums,omitempty" xml:"enums"` Enums []*Enum `json:"enums,omitempty" yaml:"enums,omitempty" xml:"enums"`
} }
// SQLName returns the schema name in lowercase // SQLName returns the schema name in lowercase for SQL compatibility.
func (d *Schema) SQLName() string { func (d *Schema) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// Table represents a database table with its columns, constraints, indexes,
// and relationships. Tables are the primary data storage structures in a database.
type Table struct { type Table struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
@@ -63,11 +73,12 @@ type Table struct {
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
} }
// SQLName returns the table name in lowercase // SQLName returns the table name in lowercase for SQL compatibility.
func (d *Table) SQLName() string { func (d *Table) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// GetPrimaryKey returns the primary key column for the table, or nil if none exists.
func (m Table) GetPrimaryKey() *Column { func (m Table) GetPrimaryKey() *Column {
for _, column := range m.Columns { for _, column := range m.Columns {
if column.IsPrimaryKey { if column.IsPrimaryKey {
@@ -77,6 +88,7 @@ func (m Table) GetPrimaryKey() *Column {
return nil return nil
} }
// GetForeignKeys returns all foreign key constraints for the table.
func (m Table) GetForeignKeys() []*Constraint { func (m Table) GetForeignKeys() []*Constraint {
keys := make([]*Constraint, 0) keys := make([]*Constraint, 0)
@@ -101,7 +113,7 @@ type View struct {
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
} }
// SQLName returns the view name in lowercase // SQLName returns the view name in lowercase for SQL compatibility.
func (d *View) SQLName() string { func (d *View) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
@@ -124,7 +136,7 @@ type Sequence struct {
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
} }
// SQLName returns the sequence name in lowercase // SQLName returns the sequence name in lowercase for SQL compatibility.
func (d *Sequence) SQLName() string { func (d *Sequence) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
@@ -148,11 +160,13 @@ type Column struct {
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"` Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
} }
// SQLName returns the table name in lowercase // SQLName returns the column name in lowercase for SQL compatibility.
func (d *Column) SQLName() string { func (d *Column) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// Index represents a database index for optimizing query performance.
// Indexes can be unique, partial, or include additional columns.
type Index struct { type Index struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
@@ -168,19 +182,23 @@ type Index struct {
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"` Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
} }
// SQLName returns the Indexin lowercase // SQLName returns the index name in lowercase for SQL compatibility.
func (d *Index) SQLName() string { func (d *Index) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// RelationType represents the type of relationship between database tables.
type RelationType string type RelationType string
// Supported relationship types.
const ( const (
OneToOne RelationType = "one_to_one" OneToOne RelationType = "one_to_one" // One record in table A relates to one record in table B
OneToMany RelationType = "one_to_many" OneToMany RelationType = "one_to_many" // One record in table A relates to many records in table B
ManyToMany RelationType = "many_to_many" ManyToMany RelationType = "many_to_many" // Many records in table A relate to many records in table B
) )
// Relationship represents a relationship between two database tables.
// Relationships can be one-to-one, one-to-many, or many-to-many.
type Relationship struct { type Relationship struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Type RelationType `json:"type" yaml:"type" xml:"type"` Type RelationType `json:"type" yaml:"type" xml:"type"`
@@ -198,11 +216,13 @@ type Relationship struct {
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"` Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
} }
// SQLName returns the Relationship lowercase // SQLName returns the relationship name in lowercase for SQL compatibility.
func (d *Relationship) SQLName() string { func (d *Relationship) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// Constraint represents a database constraint that enforces data integrity rules.
// Constraints can be primary keys, foreign keys, unique constraints, check constraints, or not-null constraints.
type Constraint struct { type Constraint struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Type ConstraintType `json:"type" yaml:"type" xml:"type"` Type ConstraintType `json:"type" yaml:"type" xml:"type"`
@@ -220,30 +240,37 @@ type Constraint struct {
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"` Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
} }
// SQLName returns the constraint name in lowercase for SQL compatibility.
func (d *Constraint) SQLName() string { func (d *Constraint) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// ConstraintType represents the type of database constraint.
type ConstraintType string type ConstraintType string
// Enum represents a database enumeration type with a set of allowed values.
type Enum struct { type Enum struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Values []string `json:"values" yaml:"values" xml:"values"` Values []string `json:"values" yaml:"values" xml:"values"`
Schema string `json:"schema,omitempty" yaml:"schema,omitempty" xml:"schema,omitempty"` Schema string `json:"schema,omitempty" yaml:"schema,omitempty" xml:"schema,omitempty"`
} }
// SQLName returns the enum name in lowercase for SQL compatibility.
func (d *Enum) SQLName() string { func (d *Enum) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// Supported constraint types.
const ( const (
PrimaryKeyConstraint ConstraintType = "primary_key" PrimaryKeyConstraint ConstraintType = "primary_key" // Primary key uniquely identifies each record
ForeignKeyConstraint ConstraintType = "foreign_key" ForeignKeyConstraint ConstraintType = "foreign_key" // Foreign key references another table
UniqueConstraint ConstraintType = "unique" UniqueConstraint ConstraintType = "unique" // Unique constraint ensures all values are different
CheckConstraint ConstraintType = "check" CheckConstraint ConstraintType = "check" // Check constraint validates data against an expression
NotNullConstraint ConstraintType = "not_null" NotNullConstraint ConstraintType = "not_null" // Not null constraint requires a value
) )
// Script represents a database migration or initialization script.
// Scripts can have dependencies and rollback capabilities.
type Script struct { type Script struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Description string `json:"description" yaml:"description" xml:"description"` Description string `json:"description" yaml:"description" xml:"description"`
@@ -256,11 +283,12 @@ type Script struct {
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"` Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
} }
// SQLName returns the script name in lowercase for SQL compatibility.
func (d *Script) SQLName() string { func (d *Script) SQLName() string {
return strings.ToLower(d.Name) return strings.ToLower(d.Name)
} }
// Initialize functions // Initialization functions for creating new model instances with proper defaults.
// InitDatabase initializes a new Database with empty slices // InitDatabase initializes a new Database with empty slices
func InitDatabase(name string) *Database { func InitDatabase(name string) *Database {

View File

@@ -1,10 +1,12 @@
package models package models
// ============================================================================= // Summary/Compact Views
// Summary/Compact Views - Lightweight views with essential fields //
// ============================================================================= // This file provides lightweight summary structures with essential fields
// and aggregated counts for quick database schema overviews without loading
// full object graphs.
// DatabaseSummary provides a compact overview of a database // DatabaseSummary provides a compact overview of a database with aggregated statistics.
type DatabaseSummary struct { type DatabaseSummary struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
@@ -15,7 +17,7 @@ type DatabaseSummary struct {
TotalColumns int `json:"total_columns" yaml:"total_columns" xml:"total_columns"` TotalColumns int `json:"total_columns" yaml:"total_columns" xml:"total_columns"`
} }
// ToSummary converts a Database to a DatabaseSummary // ToSummary converts a Database to a DatabaseSummary with calculated counts.
func (d *Database) ToSummary() *DatabaseSummary { func (d *Database) ToSummary() *DatabaseSummary {
summary := &DatabaseSummary{ summary := &DatabaseSummary{
Name: d.Name, Name: d.Name,
@@ -36,7 +38,7 @@ func (d *Database) ToSummary() *DatabaseSummary {
return summary return summary
} }
// SchemaSummary provides a compact overview of a schema // SchemaSummary provides a compact overview of a schema with aggregated statistics.
type SchemaSummary struct { type SchemaSummary struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
@@ -47,7 +49,7 @@ type SchemaSummary struct {
TotalConstraints int `json:"total_constraints" yaml:"total_constraints" xml:"total_constraints"` TotalConstraints int `json:"total_constraints" yaml:"total_constraints" xml:"total_constraints"`
} }
// ToSummary converts a Schema to a SchemaSummary // ToSummary converts a Schema to a SchemaSummary with calculated counts.
func (s *Schema) ToSummary() *SchemaSummary { func (s *Schema) ToSummary() *SchemaSummary {
summary := &SchemaSummary{ summary := &SchemaSummary{
Name: s.Name, Name: s.Name,
@@ -66,7 +68,7 @@ func (s *Schema) ToSummary() *SchemaSummary {
return summary return summary
} }
// TableSummary provides a compact overview of a table // TableSummary provides a compact overview of a table with aggregated statistics.
type TableSummary struct { type TableSummary struct {
Name string `json:"name" yaml:"name" xml:"name"` Name string `json:"name" yaml:"name" xml:"name"`
Schema string `json:"schema" yaml:"schema" xml:"schema"` Schema string `json:"schema" yaml:"schema" xml:"schema"`
@@ -79,7 +81,7 @@ type TableSummary struct {
ForeignKeyCount int `json:"foreign_key_count" yaml:"foreign_key_count" xml:"foreign_key_count"` ForeignKeyCount int `json:"foreign_key_count" yaml:"foreign_key_count" xml:"foreign_key_count"`
} }
// ToSummary converts a Table to a TableSummary // ToSummary converts a Table to a TableSummary with calculated counts.
func (t *Table) ToSummary() *TableSummary { func (t *Table) ToSummary() *TableSummary {
summary := &TableSummary{ summary := &TableSummary{
Name: t.Name, Name: t.Name,

106
pkg/readers/bun/README.md Normal file
View File

@@ -0,0 +1,106 @@
# Bun Reader
Reads Go source files containing Bun model definitions and extracts database schema information.
## Overview
The Bun Reader parses Go source code files that define Bun models (structs with `bun` struct tags) and converts them into RelSpec's internal database model representation.
## Features
- Parses Bun struct tags to extract column definitions
- Extracts table names from `bun:"table:tablename"` tags
- Identifies primary keys, foreign keys, and indexes
- Supports relationship detection
- Handles both single files and directories
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/bun"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/models.go",
}
reader := bun.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read Bun models and convert to JSON
relspec --input bun --in-file models/ --output json --out-file schema.json
# Convert Bun models to GORM
relspec --input bun --in-file models.go --output gorm --out-file gorm_models.go
```
## Supported Bun Tags
The reader recognizes the following Bun struct tags:
- `table` - Table name
- `column` - Column name
- `type` - SQL data type
- `pk` - Primary key
- `notnull` - NOT NULL constraint
- `autoincrement` - Auto-increment column
- `default` - Default value
- `unique` - Unique constraint
- `rel` - Relationship definition
## Example Bun Model
```go
package models
import (
"time"
"github.com/uptrace/bun"
)
type User struct {
bun.BaseModel `bun:"table:users,alias:u"`
ID int64 `bun:"id,pk,autoincrement"`
Username string `bun:"username,notnull,unique"`
Email string `bun:"email,notnull"`
CreatedAt time.Time `bun:"created_at,notnull,default:now()"`
Posts []*Post `bun:"rel:has-many,join:id=user_id"`
}
type Post struct {
bun.BaseModel `bun:"table:posts,alias:p"`
ID int64 `bun:"id,pk"`
UserID int64 `bun:"user_id,notnull"`
Title string `bun:"title,notnull"`
Content string `bun:"content"`
User *User `bun:"rel:belongs-to,join:user_id=id"`
}
```
## Notes
- Test files (ending in `_test.go`) are automatically excluded
- The `bun.BaseModel` embedded struct is automatically recognized
- Schema defaults to `public` if not specified

View File

@@ -382,6 +382,23 @@ func (r *Reader) isRelationship(tag string) bool {
return strings.Contains(tag, "bun:\"rel:") || strings.Contains(tag, ",rel:") return strings.Contains(tag, "bun:\"rel:") || strings.Contains(tag, ",rel:")
} }
// getRelationType extracts the relationship type from a bun tag
func (r *Reader) getRelationType(bunTag string) string {
if strings.Contains(bunTag, "rel:has-many") {
return "has-many"
}
if strings.Contains(bunTag, "rel:belongs-to") {
return "belongs-to"
}
if strings.Contains(bunTag, "rel:has-one") {
return "has-one"
}
if strings.Contains(bunTag, "rel:many-to-many") {
return "many-to-many"
}
return ""
}
// parseRelationshipConstraints parses relationship fields to extract foreign key constraints // parseRelationshipConstraints parses relationship fields to extract foreign key constraints
func (r *Reader) parseRelationshipConstraints(table *models.Table, structType *ast.StructType, structMap map[string]*models.Table) { func (r *Reader) parseRelationshipConstraints(table *models.Table, structType *ast.StructType, structMap map[string]*models.Table) {
for _, field := range structType.Fields.List { for _, field := range structType.Fields.List {
@@ -409,27 +426,51 @@ func (r *Reader) parseRelationshipConstraints(table *models.Table, structType *a
} }
// Parse the join information: join:user_id=id // Parse the join information: join:user_id=id
// This means: referencedTable.user_id = thisTable.id // This means: thisTable.user_id = referencedTable.id
joinInfo := r.parseJoinInfo(bunTag) joinInfo := r.parseJoinInfo(bunTag)
if joinInfo == nil { if joinInfo == nil {
continue continue
} }
// The FK is on the referenced table // Determine which table gets the FK based on relationship type
relType := r.getRelationType(bunTag)
var fkTable *models.Table
var fkColumn, refTable, refColumn string
switch strings.ToLower(relType) {
case "belongs-to":
// For belongs-to: FK is on the current table
// join:user_id=id means table.user_id references referencedTable.id
fkTable = table
fkColumn = joinInfo.ForeignKey
refTable = referencedTable.Name
refColumn = joinInfo.ReferencedKey
case "has-many":
// For has-many: FK is on the referenced table
// join:id=user_id means referencedTable.user_id references table.id
fkTable = referencedTable
fkColumn = joinInfo.ReferencedKey
refTable = table.Name
refColumn = joinInfo.ForeignKey
default:
continue
}
constraint := &models.Constraint{ constraint := &models.Constraint{
Name: fmt.Sprintf("fk_%s_%s", referencedTable.Name, table.Name), Name: fmt.Sprintf("fk_%s_%s", fkTable.Name, refTable),
Type: models.ForeignKeyConstraint, Type: models.ForeignKeyConstraint,
Table: referencedTable.Name, Table: fkTable.Name,
Schema: referencedTable.Schema, Schema: fkTable.Schema,
Columns: []string{joinInfo.ForeignKey}, Columns: []string{fkColumn},
ReferencedTable: table.Name, ReferencedTable: refTable,
ReferencedSchema: table.Schema, ReferencedSchema: fkTable.Schema,
ReferencedColumns: []string{joinInfo.ReferencedKey}, ReferencedColumns: []string{refColumn},
OnDelete: "NO ACTION", // Bun doesn't specify this in tags OnDelete: "NO ACTION", // Bun doesn't specify this in tags
OnUpdate: "NO ACTION", OnUpdate: "NO ACTION",
} }
referencedTable.Constraints[constraint.Name] = constraint fkTable.Constraints[constraint.Name] = constraint
} }
} }

101
pkg/readers/dbml/README.md Normal file
View File

@@ -0,0 +1,101 @@
# DBML Reader
Reads Database Markup Language (DBML) files and extracts database schema information.
## Overview
The DBML Reader parses `.dbml` files that define database schemas using the DBML syntax (used by dbdiagram.io) and converts them into RelSpec's internal database model representation.
## Features
- Parses DBML syntax
- Extracts tables, columns, and relationships
- Supports DBML-specific features:
- Table groups and notes
- Enum definitions
- Indexes
- Foreign key relationships
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/dbml"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/schema.dbml",
}
reader := dbml.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read DBML file and convert to JSON
relspec --input dbml --in-file schema.dbml --output json --out-file schema.json
# Convert DBML to GORM models
relspec --input dbml --in-file database.dbml --output gorm --out-file models.go
```
## Example DBML File
```dbml
Table users {
id bigserial [pk, increment]
username varchar(50) [not null, unique]
email varchar(100) [not null]
created_at timestamp [not null, default: `now()`]
Note: 'Users table'
}
Table posts {
id bigserial [pk]
user_id bigint [not null, ref: > users.id]
title varchar(200) [not null]
content text
indexes {
user_id
(user_id, created_at) [name: 'idx_user_posts']
}
}
Ref: posts.user_id > users.id [delete: cascade]
```
## DBML Features Supported
- Table definitions with columns
- Primary keys (`pk`)
- Not null constraints (`not null`)
- Unique constraints (`unique`)
- Default values (`default`)
- Inline references (`ref`)
- Standalone `Ref` blocks
- Indexes and composite indexes
- Table notes and column notes
- Enums
## Notes
- DBML is designed for database documentation and diagramming
- Schema name defaults to `public`
- Relationship cardinality is preserved

View File

@@ -0,0 +1,96 @@
# DCTX Reader
Reads Clarion database dictionary (DCTX) files and extracts database schema information.
## Overview
The DCTX Reader parses Clarion dictionary files (`.dctx`) that define database structures in the Clarion development system and converts them into RelSpec's internal database model representation.
## Features
- Parses Clarion DCTX XML format
- Extracts file (table) and field (column) definitions
- Supports Clarion data types
- Handles keys (indexes) and relationships
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/dctx"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/database.dctx",
}
reader := dctx.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read DCTX file and convert to JSON
relspec --input dctx --in-file legacy.dctx --output json --out-file schema.json
# Convert DCTX to GORM models for migration
relspec --input dctx --in-file app.dctx --output gorm --out-file models.go
# Export DCTX to PostgreSQL DDL
relspec --input dctx --in-file database.dctx --output pgsql --out-file schema.sql
```
## Example DCTX Structure
DCTX files are XML-based Clarion dictionary files that define:
- Files (equivalent to tables)
- Fields (columns) with Clarion-specific types
- Keys (indexes)
- Relationships between files
Common Clarion data types:
- `STRING` - Fixed-length string
- `CSTRING` - C-style null-terminated string
- `LONG` - 32-bit integer
- `SHORT` - 16-bit integer
- `DECIMAL` - Decimal number
- `REAL` - Floating point
- `DATE` - Date field
- `TIME` - Time field
## Type Mapping
The reader automatically maps Clarion data types to standard SQL types:
| Clarion Type | SQL Type |
|--------------|----------|
| STRING | VARCHAR |
| CSTRING | VARCHAR |
| LONG | INTEGER |
| SHORT | SMALLINT |
| DECIMAL | NUMERIC |
| REAL | REAL |
| DATE | DATE |
| TIME | TIME |
## Notes
- DCTX is specific to Clarion development platform
- Useful for migrating legacy Clarion applications
- Schema name defaults to `public`
- Preserves field properties and constraints where possible

View File

@@ -0,0 +1,96 @@
# DrawDB Reader
Reads DrawDB schema files and extracts database schema information.
## Overview
The DrawDB Reader parses JSON files exported from DrawDB (a free online database design tool) and converts them into RelSpec's internal database model representation.
## Features
- Parses DrawDB JSON format
- Extracts tables, fields, and relationships
- Supports DrawDB-specific metadata
- Preserves visual layout information
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/drawdb"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/diagram.json",
}
reader := drawdb.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read DrawDB export and convert to JSON schema
relspec --input drawdb --in-file diagram.json --output json --out-file schema.json
# Convert DrawDB design to GORM models
relspec --input drawdb --in-file design.json --output gorm --out-file models.go
```
## Example DrawDB Export
DrawDB exports database designs as JSON files containing:
```json
{
"tables": [
{
"id": "1",
"name": "users",
"fields": [
{
"name": "id",
"type": "BIGINT",
"primary": true,
"autoIncrement": true
},
{
"name": "username",
"type": "VARCHAR",
"size": 50,
"notNull": true,
"unique": true
}
]
}
],
"relationships": [
{
"source": "posts",
"target": "users",
"type": "many-to-one"
}
]
}
```
## Notes
- DrawDB is a free online database designer at drawdb.vercel.app
- Export format preserves visual design metadata
- Useful for converting visual designs to code
- Schema defaults to `public`

View File

@@ -0,0 +1,90 @@
# Drizzle Reader
Reads TypeScript/JavaScript files containing Drizzle ORM schema definitions and extracts database schema information.
## Overview
The Drizzle Reader parses Drizzle ORM schema files (TypeScript/JavaScript) that define database tables using Drizzle's schema builder and converts them into RelSpec's internal database model representation.
## Features
- Parses Drizzle schema definitions
- Extracts table, column, and relationship information
- Supports various Drizzle column types
- Handles constraints and indexes
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/drizzle"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/schema.ts",
}
reader := drizzle.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read Drizzle schema and convert to JSON
relspec --input drizzle --in-file schema.ts --output json --out-file schema.json
# Convert Drizzle to GORM models
relspec --input drizzle --in-file schema/ --output gorm --out-file models.go
```
## Example Drizzle Schema
```typescript
import { pgTable, serial, varchar, text, timestamp, integer } from 'drizzle-orm/pg-core';
import { relations } from 'drizzle-orm';
export const users = pgTable('users', {
id: serial('id').primaryKey(),
username: varchar('username', { length: 50 }).notNull().unique(),
email: varchar('email', { length: 100 }).notNull(),
createdAt: timestamp('created_at').notNull().defaultNow(),
});
export const posts = pgTable('posts', {
id: serial('id').primaryKey(),
userId: integer('user_id').notNull().references(() => users.id, { onDelete: 'cascade' }),
title: varchar('title', { length: 200 }).notNull(),
content: text('content'),
});
export const usersRelations = relations(users, ({ many }) => ({
posts: many(posts),
}));
export const postsRelations = relations(posts, ({ one }) => ({
user: one(users, {
fields: [posts.userId],
references: [users.id],
}),
}));
```
## Notes
- Supports both PostgreSQL and MySQL Drizzle schemas
- Extracts relationship information from `relations` definitions
- Schema defaults to `public` for PostgreSQL

View File

@@ -0,0 +1,619 @@
package drizzle
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
)
// Reader implements the readers.Reader interface for Drizzle schema format
type Reader struct {
options *readers.ReaderOptions
}
// NewReader creates a new Drizzle reader with the given options
func NewReader(options *readers.ReaderOptions) *Reader {
return &Reader{
options: options,
}
}
// ReadDatabase reads and parses Drizzle schema input, returning a Database model
func (r *Reader) ReadDatabase() (*models.Database, error) {
if r.options.FilePath == "" {
return nil, fmt.Errorf("file path is required for Drizzle reader")
}
// Check if it's a file or directory
info, err := os.Stat(r.options.FilePath)
if err != nil {
return nil, fmt.Errorf("failed to stat path: %w", err)
}
if info.IsDir() {
// Read all .ts files in the directory
return r.readDirectory(r.options.FilePath)
}
// Read single file
content, err := os.ReadFile(r.options.FilePath)
if err != nil {
return nil, fmt.Errorf("failed to read file: %w", err)
}
return r.parseDrizzle(string(content))
}
// ReadSchema reads and parses Drizzle schema input, returning a Schema model
func (r *Reader) ReadSchema() (*models.Schema, error) {
db, err := r.ReadDatabase()
if err != nil {
return nil, err
}
if len(db.Schemas) == 0 {
return nil, fmt.Errorf("no schemas found in Drizzle schema")
}
// Return the first schema
return db.Schemas[0], nil
}
// ReadTable reads and parses Drizzle schema input, returning a Table model
func (r *Reader) ReadTable() (*models.Table, error) {
schema, err := r.ReadSchema()
if err != nil {
return nil, err
}
if len(schema.Tables) == 0 {
return nil, fmt.Errorf("no tables found in Drizzle schema")
}
// Return the first table
return schema.Tables[0], nil
}
// readDirectory reads all .ts files in a directory and parses them
func (r *Reader) readDirectory(dirPath string) (*models.Database, error) {
db := models.InitDatabase("database")
if r.options.Metadata != nil {
if name, ok := r.options.Metadata["name"].(string); ok {
db.Name = name
}
}
// Default schema for Drizzle
schema := models.InitSchema("public")
schema.Enums = make([]*models.Enum, 0)
// Read all .ts files
files, err := filepath.Glob(filepath.Join(dirPath, "*.ts"))
if err != nil {
return nil, fmt.Errorf("failed to glob directory: %w", err)
}
// Parse each file
for _, file := range files {
content, err := os.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("failed to read file %s: %w", file, err)
}
// Parse and merge into schema
fileDB, err := r.parseDrizzle(string(content))
if err != nil {
return nil, fmt.Errorf("failed to parse file %s: %w", file, err)
}
// Merge schemas
if len(fileDB.Schemas) > 0 {
fileSchema := fileDB.Schemas[0]
schema.Tables = append(schema.Tables, fileSchema.Tables...)
schema.Enums = append(schema.Enums, fileSchema.Enums...)
}
}
db.Schemas = append(db.Schemas, schema)
return db, nil
}
// parseDrizzle parses Drizzle schema content and returns a Database model
func (r *Reader) parseDrizzle(content string) (*models.Database, error) {
db := models.InitDatabase("database")
if r.options.Metadata != nil {
if name, ok := r.options.Metadata["name"].(string); ok {
db.Name = name
}
}
// Default schema for Drizzle (PostgreSQL)
schema := models.InitSchema("public")
schema.Enums = make([]*models.Enum, 0)
db.DatabaseType = models.PostgresqlDatabaseType
scanner := bufio.NewScanner(strings.NewReader(content))
// Regex patterns
// Match: export const users = pgTable('users', {
pgTableRegex := regexp.MustCompile(`export\s+const\s+(\w+)\s*=\s*pgTable\s*\(\s*['"](\w+)['"]`)
// Match: export const userRole = pgEnum('UserRole', ['admin', 'user']);
pgEnumRegex := regexp.MustCompile(`export\s+const\s+(\w+)\s*=\s*pgEnum\s*\(\s*['"](\w+)['"]`)
// State tracking
var currentTable *models.Table
var currentTableVarName string
var inTableBlock bool
var blockDepth int
var tableLines []string
for scanner.Scan() {
line := scanner.Text()
trimmed := strings.TrimSpace(line)
// Skip empty lines and comments
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
continue
}
// Check for pgEnum definition
if matches := pgEnumRegex.FindStringSubmatch(trimmed); matches != nil {
enum := r.parsePgEnum(trimmed, matches)
if enum != nil {
schema.Enums = append(schema.Enums, enum)
}
continue
}
// Check for pgTable definition
if matches := pgTableRegex.FindStringSubmatch(trimmed); matches != nil {
varName := matches[1]
tableName := matches[2]
currentTableVarName = varName
currentTable = models.InitTable(tableName, "public")
inTableBlock = true
// Count braces in the first line
blockDepth = strings.Count(line, "{") - strings.Count(line, "}")
tableLines = []string{line}
continue
}
// If we're in a table block, accumulate lines
if inTableBlock {
tableLines = append(tableLines, line)
// Track brace depth
blockDepth += strings.Count(line, "{")
blockDepth -= strings.Count(line, "}")
// Check if we've closed the table definition
if blockDepth < 0 || (blockDepth == 0 && strings.Contains(line, ");")) {
// Parse the complete table block
if currentTable != nil {
r.parseTableBlock(tableLines, currentTable, currentTableVarName)
schema.Tables = append(schema.Tables, currentTable)
currentTable = nil
}
inTableBlock = false
tableLines = nil
}
}
}
db.Schemas = append(db.Schemas, schema)
return db, nil
}
// parsePgEnum parses a pgEnum definition
func (r *Reader) parsePgEnum(line string, matches []string) *models.Enum {
// matches[1] = variable name
// matches[2] = enum name
enumName := matches[2]
// Extract values from the array
// Example: pgEnum('UserRole', ['admin', 'user', 'guest'])
valuesRegex := regexp.MustCompile(`\[(.*?)\]`)
valuesMatch := valuesRegex.FindStringSubmatch(line)
if valuesMatch == nil {
return nil
}
valuesStr := valuesMatch[1]
// Split by comma and clean up
valueParts := strings.Split(valuesStr, ",")
values := make([]string, 0)
for _, part := range valueParts {
// Remove quotes and whitespace
cleaned := strings.TrimSpace(part)
cleaned = strings.Trim(cleaned, "'\"")
if cleaned != "" {
values = append(values, cleaned)
}
}
return &models.Enum{
Name: enumName,
Values: values,
Schema: "public",
}
}
// parseTableBlock parses a complete pgTable definition block
func (r *Reader) parseTableBlock(lines []string, table *models.Table, tableVarName string) {
// Join all lines into a single string for easier parsing
fullText := strings.Join(lines, "\n")
// Extract the columns block and index callback separately
// The structure is: pgTable('name', { columns }, (table) => [indexes])
// Find the main object block (columns)
columnsStart := strings.Index(fullText, "{")
if columnsStart == -1 {
return
}
// Find matching closing brace for columns
depth := 0
columnsEnd := -1
for i := columnsStart; i < len(fullText); i++ {
if fullText[i] == '{' {
depth++
} else if fullText[i] == '}' {
depth--
if depth == 0 {
columnsEnd = i
break
}
}
}
if columnsEnd == -1 {
return
}
columnsBlock := fullText[columnsStart+1 : columnsEnd]
// Parse columns
r.parseColumnsBlock(columnsBlock, table, tableVarName)
// Check for index callback: , (table) => [ or , ({ col1, col2 }) => [
// Match: }, followed by arrow function with any parameters
// Use (?s) flag to make . match newlines
indexCallbackRegex := regexp.MustCompile(`(?s)}\s*,\s*\(.*?\)\s*=>\s*\[`)
if indexCallbackRegex.MatchString(fullText[columnsEnd:]) {
// Find the index array
indexStart := strings.Index(fullText[columnsEnd:], "[")
if indexStart != -1 {
indexStart += columnsEnd
indexDepth := 0
indexEnd := -1
for i := indexStart; i < len(fullText); i++ {
if fullText[i] == '[' {
indexDepth++
} else if fullText[i] == ']' {
indexDepth--
if indexDepth == 0 {
indexEnd = i
break
}
}
}
if indexEnd != -1 {
indexBlock := fullText[indexStart+1 : indexEnd]
r.parseIndexBlock(indexBlock, table, tableVarName)
}
}
}
}
// parseColumnsBlock parses the columns block of a table
func (r *Reader) parseColumnsBlock(block string, table *models.Table, tableVarName string) {
// Split by lines and parse each column definition
lines := strings.Split(block, "\n")
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
continue
}
// Match: fieldName: columnType('columnName').modifier().modifier(),
// Example: id: integer('id').primaryKey(),
columnRegex := regexp.MustCompile(`(\w+):\s*(\w+)\s*\(`)
matches := columnRegex.FindStringSubmatch(trimmed)
if matches == nil {
continue
}
fieldName := matches[1]
columnType := matches[2]
// Parse the column definition
col := r.parseColumnDefinition(trimmed, fieldName, columnType, table)
if col != nil {
table.Columns[col.Name] = col
}
}
}
// parseColumnDefinition parses a single column definition line
func (r *Reader) parseColumnDefinition(line, fieldName, drizzleType string, table *models.Table) *models.Column {
// Check for enum column syntax: pgEnum('EnumName')('column_name')
enumRegex := regexp.MustCompile(`pgEnum\s*\(['"](\w+)['"]\)\s*\(['"](\w+)['"]\)`)
if enumMatch := enumRegex.FindStringSubmatch(line); enumMatch != nil {
enumName := enumMatch[1]
columnName := enumMatch[2]
column := models.InitColumn(columnName, table.Name, table.Schema)
column.Type = enumName
column.NotNull = false
// Parse modifiers
r.parseColumnModifiers(line, column, table)
return column
}
// Extract column name from the first argument
// Example: integer('id')
nameRegex := regexp.MustCompile(`\w+\s*\(['"](\w+)['"]\)`)
nameMatch := nameRegex.FindStringSubmatch(line)
if nameMatch == nil {
return nil
}
columnName := nameMatch[1]
column := models.InitColumn(columnName, table.Name, table.Schema)
// Map Drizzle type to SQL type
column.Type = r.drizzleTypeToSQL(drizzleType)
// Default: columns are nullable unless specified
column.NotNull = false
// Parse modifiers
r.parseColumnModifiers(line, column, table)
return column
}
// drizzleTypeToSQL converts Drizzle column types to SQL types
func (r *Reader) drizzleTypeToSQL(drizzleType string) string {
typeMap := map[string]string{
// Integer types
"integer": "integer",
"bigint": "bigint",
"smallint": "smallint",
// Serial types
"serial": "serial",
"bigserial": "bigserial",
"smallserial": "smallserial",
// Numeric types
"numeric": "numeric",
"real": "real",
"doublePrecision": "double precision",
// Character types
"text": "text",
"varchar": "varchar",
"char": "char",
// Boolean
"boolean": "boolean",
// Binary
"bytea": "bytea",
// JSON
"json": "json",
"jsonb": "jsonb",
// Date/Time
"time": "time",
"timestamp": "timestamp",
"date": "date",
"interval": "interval",
// UUID
"uuid": "uuid",
// Geometric
"point": "point",
"line": "line",
}
if sqlType, ok := typeMap[drizzleType]; ok {
return sqlType
}
// If not found, might be an enum - return as-is
return drizzleType
}
// parseColumnModifiers parses column modifiers like .primaryKey(), .notNull(), etc.
func (r *Reader) parseColumnModifiers(line string, column *models.Column, table *models.Table) {
// Check for .primaryKey()
if strings.Contains(line, ".primaryKey()") {
column.IsPrimaryKey = true
column.NotNull = true
}
// Check for .notNull()
if strings.Contains(line, ".notNull()") {
column.NotNull = true
}
// Check for .unique()
if strings.Contains(line, ".unique()") {
uniqueConstraint := models.InitConstraint(
fmt.Sprintf("uq_%s", column.Name),
models.UniqueConstraint,
)
uniqueConstraint.Schema = table.Schema
uniqueConstraint.Table = table.Name
uniqueConstraint.Columns = []string{column.Name}
table.Constraints[uniqueConstraint.Name] = uniqueConstraint
}
// Check for .default(...)
// Need to handle nested backticks and parentheses in SQL expressions
defaultIdx := strings.Index(line, ".default(")
if defaultIdx != -1 {
start := defaultIdx + len(".default(")
depth := 1
inBacktick := false
i := start
for i < len(line) && depth > 0 {
ch := line[i]
if ch == '`' {
inBacktick = !inBacktick
} else if !inBacktick {
switch ch {
case '(':
depth++
case ')':
depth--
}
}
i++
}
if depth == 0 {
defaultValue := strings.TrimSpace(line[start : i-1])
r.parseDefaultValue(defaultValue, column)
}
}
// Check for .generatedAlwaysAsIdentity()
if strings.Contains(line, ".generatedAlwaysAsIdentity()") {
column.AutoIncrement = true
}
// Check for .references(() => otherTable.column)
referencesRegex := regexp.MustCompile(`\.references\(\(\)\s*=>\s*(\w+)\.(\w+)\)`)
if matches := referencesRegex.FindStringSubmatch(line); matches != nil {
refTableVar := matches[1]
refColumn := matches[2]
// Create FK constraint
constraintName := fmt.Sprintf("fk_%s_%s", table.Name, column.Name)
constraint := models.InitConstraint(constraintName, models.ForeignKeyConstraint)
constraint.Schema = table.Schema
constraint.Table = table.Name
constraint.Columns = []string{column.Name}
constraint.ReferencedSchema = table.Schema // Assume same schema
constraint.ReferencedTable = r.varNameToTableName(refTableVar)
constraint.ReferencedColumns = []string{refColumn}
table.Constraints[constraint.Name] = constraint
}
}
// parseDefaultValue parses a default value expression
func (r *Reader) parseDefaultValue(defaultExpr string, column *models.Column) {
defaultExpr = strings.TrimSpace(defaultExpr)
// Handle SQL expressions like sql`now()`
sqlRegex := regexp.MustCompile("sql`([^`]+)`")
if match := sqlRegex.FindStringSubmatch(defaultExpr); match != nil {
column.Default = match[1]
return
}
// Handle boolean values
if defaultExpr == "true" {
column.Default = true
return
}
if defaultExpr == "false" {
column.Default = false
return
}
// Handle string literals
if strings.HasPrefix(defaultExpr, "'") && strings.HasSuffix(defaultExpr, "'") {
column.Default = defaultExpr[1 : len(defaultExpr)-1]
return
}
if strings.HasPrefix(defaultExpr, "\"") && strings.HasSuffix(defaultExpr, "\"") {
column.Default = defaultExpr[1 : len(defaultExpr)-1]
return
}
// Try to parse as number
column.Default = defaultExpr
}
// parseIndexBlock parses the index callback block
func (r *Reader) parseIndexBlock(block string, table *models.Table, tableVarName string) {
// Split by lines
lines := strings.Split(block, "\n")
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
continue
}
// Match: index('index_name').on(table.col1, table.col2)
// or: uniqueIndex('index_name').on(table.col1, table.col2)
indexRegex := regexp.MustCompile(`(uniqueIndex|index)\s*\(['"](\w+)['"]\)\s*\.on\s*\((.*?)\)`)
matches := indexRegex.FindStringSubmatch(trimmed)
if matches == nil {
continue
}
indexType := matches[1]
indexName := matches[2]
columnsStr := matches[3]
// Parse column list
columnParts := strings.Split(columnsStr, ",")
columns := make([]string, 0)
for _, part := range columnParts {
// Remove table prefix: table.column -> column
cleaned := strings.TrimSpace(part)
if strings.Contains(cleaned, ".") {
parts := strings.Split(cleaned, ".")
cleaned = parts[len(parts)-1]
}
columns = append(columns, cleaned)
}
if indexType == "uniqueIndex" {
// Create unique constraint
constraint := models.InitConstraint(indexName, models.UniqueConstraint)
constraint.Schema = table.Schema
constraint.Table = table.Name
constraint.Columns = columns
table.Constraints[constraint.Name] = constraint
} else {
// Create index
index := models.InitIndex(indexName, table.Name, table.Schema)
index.Columns = columns
index.Unique = false
table.Indexes[index.Name] = index
}
}
}
// varNameToTableName converts a variable name to a table name
// For now, just return as-is (could add inflection later)
func (r *Reader) varNameToTableName(varName string) string {
// TODO: Could add conversion logic here if needed
// For now, assume variable name matches table name
return varName
}

141
pkg/readers/gorm/README.md Normal file
View File

@@ -0,0 +1,141 @@
# GORM Reader
Reads Go source files containing GORM model definitions and extracts database schema information.
## Overview
The GORM Reader parses Go source code files that define GORM models (structs with `gorm` struct tags) and converts them into RelSpec's internal database model representation. It supports reading from individual files or entire directories.
## Features
- Parses GORM struct tags to extract column definitions
- Extracts table names from `TableName()` methods
- Identifies primary keys, foreign keys, and indexes
- Supports relationship detection (has-many, belongs-to)
- Handles both single files and directories
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/gorm"
)
func main() {
// Read from a single file
options := &readers.ReaderOptions{
FilePath: "/path/to/models.go",
}
reader := gorm.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### Reading from Directory
```go
// Read all .go files from a directory
options := &readers.ReaderOptions{
FilePath: "/path/to/models/",
}
reader := gorm.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
```
### CLI Example
```bash
# Read GORM models and convert to JSON
relspec --input gorm --in-file models/ --output json --out-file schema.json
# Convert GORM models to Bun
relspec --input gorm --in-file models.go --output bun --out-file bun_models.go
```
## Supported GORM Tags
The reader recognizes the following GORM struct tags:
- `column` - Column name
- `type` - SQL data type (e.g., `varchar(255)`, `bigint`)
- `primaryKey` or `primary_key` - Mark as primary key
- `not null` - NOT NULL constraint
- `autoIncrement` - Auto-increment column
- `default` - Default value
- `size` - Column size/length
- `index` - Create index
- `uniqueIndex` - Create unique index
- `unique` - Unique constraint
- `foreignKey` - Foreign key column
- `references` - Referenced column
- `constraint` - Constraint behavior (OnDelete, OnUpdate)
## Example GORM Model
```go
package models
import (
"time"
"gorm.io/gorm"
)
type ModelUser struct {
gorm.Model
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement"`
Username string `gorm:"column:username;type:varchar(50);not null;uniqueIndex"`
Email string `gorm:"column:email;type:varchar(100);not null"`
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:now()"`
// Relationships
Posts []*ModelPost `gorm:"foreignKey:UserID;references:ID;constraint:OnDelete:CASCADE"`
}
func (ModelUser) TableName() string {
return "public.users"
}
type ModelPost struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey"`
UserID int64 `gorm:"column:user_id;type:bigint;not null"`
Title string `gorm:"column:title;type:varchar(200);not null"`
Content string `gorm:"column:content;type:text"`
// Belongs-to relationship
User *ModelUser `gorm:"foreignKey:UserID;references:ID"`
}
func (ModelPost) TableName() string {
return "public.posts"
}
```
## Notes
- Test files (ending in `_test.go`) are automatically excluded
- The `gorm.Model` embedded struct is automatically recognized and skipped
- Table names are derived from struct names if `TableName()` method is not present
- Schema defaults to `public` if not specified in `TableName()`
- Relationships are inferred from GORM relationship tags
## Limitations
- Complex relationship types (many-to-many with join tables) may need manual verification
- Custom GORM types may not be fully supported
- Some advanced GORM features may not be captured

View File

@@ -0,0 +1,203 @@
# GraphQL Schema Reader
The GraphQL reader parses GraphQL Schema Definition Language (SDL) files and converts them into RelSpec's internal database model.
## Features
- **Standard GraphQL SDL** support (generic, non-framework-specific)
- **Type to Table mapping**: GraphQL types become database tables
- **Field to Column mapping**: GraphQL fields become table columns
- **Enum support**: GraphQL enums are preserved
- **Custom scalars**: DateTime, JSON, Date automatically mapped to appropriate SQL types
- **Implicit relationships**: Detects relationships from field types
- **Many-to-many support**: Creates junction tables for bidirectional array relationships
- **Configurable ID mapping**: Choose between bigint (default) or UUID for ID fields
## Supported GraphQL Features
### Built-in Scalars
- `ID` → bigint (default) or uuid (configurable)
- `String` → text
- `Int` → integer
- `Float` → double precision
- `Boolean` → boolean
### Custom Scalars
- `DateTime` → timestamp
- `JSON` → jsonb
- `Date` → date
- `Time` → time
- `Decimal` → numeric
Additional custom scalars can be mapped via metadata.
### Relationships
Relationships are inferred from field types:
```graphql
type Post {
id: ID!
title: String!
author: User! # Many-to-one (creates authorId FK column, NOT NULL)
reviewer: User # Many-to-one nullable (creates reviewerId FK column, NULL)
tags: [Tag!]! # One-to-many or many-to-many (depending on reverse)
}
type User {
id: ID!
posts: [Post!]! # Reverse of Post.author (no FK created)
}
type Tag {
id: ID!
posts: [Post!]! # Many-to-many with Post (creates PostTag junction table)
}
```
**Relationship Detection Rules:**
- Single type reference (`user: User`) → Creates FK column (e.g., `userId`)
- Array type reference (`posts: [Post!]!`) → One-to-many reverse (no FK on this table)
- Bidirectional arrays → Many-to-many (creates junction table)
### Enums
```graphql
enum Role {
ADMIN
USER
GUEST
}
type User {
role: Role!
}
```
Enums are preserved in the schema and can be used as column types.
## Usage
### Basic Usage
```go
import (
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/graphql"
)
opts := &readers.ReaderOptions{
FilePath: "schema.graphql",
}
reader := graphql.NewReader(opts)
db, err := reader.ReadDatabase()
```
### With UUID ID Type
```go
opts := &readers.ReaderOptions{
FilePath: "schema.graphql",
Metadata: map[string]interface{}{
"idType": "uuid", // Map ID scalar to uuid instead of bigint
},
}
reader := graphql.NewReader(opts)
db, err := reader.ReadDatabase()
```
### With Per-Type ID Mapping
```go
opts := &readers.ReaderOptions{
FilePath: "schema.graphql",
Metadata: map[string]interface{}{
"typeIdMappings": map[string]string{
"User": "uuid", // User.id → uuid
"Post": "bigint", // Post.id → bigint
},
},
}
```
### With Custom Scalar Mappings
```go
opts := &readers.ReaderOptions{
FilePath: "schema.graphql",
Metadata: map[string]interface{}{
"customScalarMappings": map[string]string{
"Upload": "bytea",
"Decimal": "numeric(10,2)",
},
},
}
```
## CLI Usage
```bash
# Convert GraphQL to JSON
relspec convert --from graphql --from-path schema.graphql \
--to json --to-path schema.json
# Convert GraphQL to GORM models
relspec convert --from graphql --from-path schema.graphql \
--to gorm --to-path models/ --package models
# Convert GraphQL to PostgreSQL SQL
relspec convert --from graphql --from-path schema.graphql \
--to pgsql --to-path schema.sql
```
## Metadata Options
| Option | Type | Description | Default |
|--------|------|-------------|---------|
| `idType` | string | Global ID type mapping ("bigint" or "uuid") | "bigint" |
| `typeIdMappings` | map[string]string | Per-type ID mappings | {} |
| `customScalarMappings` | map[string]string | Custom scalar to SQL type mappings | {} |
| `schemaName` | string | Schema name for all tables | "public" |
## Limitations
- Only supports GraphQL SDL (Schema Definition Language), not queries or mutations
- Directives are ignored (except for future extensibility)
- Interfaces and Unions are not supported
- GraphQL's concept of "schema" is different from database schemas; all types go into a single database schema (default: "public")
## Example
**Input** (`schema.graphql`):
```graphql
scalar DateTime
enum Role {
ADMIN
USER
}
type User {
id: ID!
email: String!
role: Role!
createdAt: DateTime!
posts: [Post!]!
}
type Post {
id: ID!
title: String!
content: String
published: Boolean!
author: User!
}
```
**Result**: Database with:
- 2 tables: `User` and `Post`
- `Post` table has `authorId` foreign key to `User.id`
- `Role` enum with values: ADMIN, USER
- Custom scalar `DateTime` mapped to `timestamp`

View File

@@ -0,0 +1,279 @@
package graphql
import (
"bufio"
"fmt"
"os"
"regexp"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
)
type Reader struct {
options *readers.ReaderOptions
}
func NewReader(options *readers.ReaderOptions) *Reader {
return &Reader{
options: options,
}
}
func (r *Reader) ReadDatabase() (*models.Database, error) {
if r.options.FilePath == "" {
return nil, fmt.Errorf("file path is required for GraphQL reader")
}
content, err := os.ReadFile(r.options.FilePath)
if err != nil {
return nil, fmt.Errorf("failed to read file: %w", err)
}
return r.parseGraphQL(string(content))
}
func (r *Reader) ReadSchema() (*models.Schema, error) {
db, err := r.ReadDatabase()
if err != nil {
return nil, err
}
if len(db.Schemas) == 0 {
return nil, fmt.Errorf("no schemas found")
}
return db.Schemas[0], nil
}
func (r *Reader) ReadTable() (*models.Table, error) {
schema, err := r.ReadSchema()
if err != nil {
return nil, err
}
if len(schema.Tables) == 0 {
return nil, fmt.Errorf("no tables found")
}
return schema.Tables[0], nil
}
type parseContext struct {
inType bool
inEnum bool
currentType string
typeLines []string
currentEnum string
enumLines []string
customScalars map[string]bool
}
func (r *Reader) parseGraphQL(content string) (*models.Database, error) {
dbName := "database"
if r.options.Metadata != nil {
if name, ok := r.options.Metadata["name"].(string); ok {
dbName = name
}
}
db := models.InitDatabase(dbName)
schema := models.InitSchema("public")
ctx := &parseContext{
customScalars: make(map[string]bool),
}
// First pass: collect custom scalars and enums
scanner := bufio.NewScanner(strings.NewReader(content))
scalarRegex := regexp.MustCompile(`^\s*scalar\s+(\w+)`)
enumRegex := regexp.MustCompile(`^\s*enum\s+(\w+)\s*\{`)
closingBraceRegex := regexp.MustCompile(`^\s*\}`)
for scanner.Scan() {
line := scanner.Text()
trimmed := strings.TrimSpace(line)
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
continue
}
if matches := scalarRegex.FindStringSubmatch(trimmed); matches != nil {
ctx.customScalars[matches[1]] = true
continue
}
if matches := enumRegex.FindStringSubmatch(trimmed); matches != nil {
ctx.inEnum = true
ctx.currentEnum = matches[1]
ctx.enumLines = []string{}
continue
}
if closingBraceRegex.MatchString(trimmed) && ctx.inEnum {
r.parseEnum(ctx.currentEnum, ctx.enumLines, schema)
// Add enum name to custom scalars for type detection
ctx.customScalars[ctx.currentEnum] = true
ctx.inEnum = false
ctx.currentEnum = ""
ctx.enumLines = nil
continue
}
if ctx.inEnum {
ctx.enumLines = append(ctx.enumLines, line)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("scanner error: %w", err)
}
// Second pass: parse types
scanner = bufio.NewScanner(strings.NewReader(content))
typeRegex := regexp.MustCompile(`^\s*type\s+(\w+)\s*\{`)
ctx.inType = false
ctx.inEnum = false
for scanner.Scan() {
line := scanner.Text()
trimmed := strings.TrimSpace(line)
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
continue
}
if matches := typeRegex.FindStringSubmatch(trimmed); matches != nil {
ctx.inType = true
ctx.currentType = matches[1]
ctx.typeLines = []string{}
continue
}
if closingBraceRegex.MatchString(trimmed) && ctx.inType {
if err := r.parseType(ctx.currentType, ctx.typeLines, schema, ctx); err != nil {
return nil, fmt.Errorf("failed to parse type %s: %w", ctx.currentType, err)
}
ctx.inType = false
ctx.currentType = ""
ctx.typeLines = nil
continue
}
if ctx.inType {
ctx.typeLines = append(ctx.typeLines, line)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("scanner error: %w", err)
}
db.Schemas = []*models.Schema{schema}
// Third pass: detect and create relationships
if err := r.detectAndCreateRelationships(schema, ctx); err != nil {
return nil, fmt.Errorf("failed to create relationships: %w", err)
}
return db, nil
}
type fieldInfo struct {
name string
typeName string
isArray bool
isNullable bool
innerNullable bool
}
func (r *Reader) parseType(typeName string, lines []string, schema *models.Schema, ctx *parseContext) error {
table := models.InitTable(typeName, schema.Name)
table.Metadata = make(map[string]any)
// Store field info for relationship detection
relationFields := make(map[string]*fieldInfo)
fieldRegex := regexp.MustCompile(`^\s*(\w+)\s*:\s*(\[)?(\w+)(!)?(\])?(!)?\s*`)
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
continue
}
matches := fieldRegex.FindStringSubmatch(trimmed)
if matches == nil {
continue
}
fieldName := matches[1]
hasOpenBracket := matches[2] == "["
baseType := matches[3]
innerNonNull := matches[4] == "!"
hasCloseBracket := matches[5] == "]"
outerNonNull := matches[6] == "!"
isArray := hasOpenBracket && hasCloseBracket
// Determine if this is a scalar or a relation
if r.isScalarType(baseType, ctx) {
// This is a scalar field
column := models.InitColumn(fieldName, table.Name, schema.Name)
column.Type = r.graphQLTypeToSQL(baseType, fieldName, typeName)
if isArray {
// Array of scalars: use array type
column.Type += "[]"
column.NotNull = outerNonNull
} else {
column.NotNull = !isArray && innerNonNull
}
// Check if this is a primary key (convention: field named "id")
if fieldName == "id" {
column.IsPrimaryKey = true
column.AutoIncrement = true
}
table.Columns[fieldName] = column
} else {
// This is a relation field - store for later processing
relationFields[fieldName] = &fieldInfo{
name: fieldName,
typeName: baseType,
isArray: isArray,
isNullable: !innerNonNull && !isArray,
innerNullable: !innerNonNull && isArray,
}
}
}
// Store relation fields in table metadata for relationship detection
if len(relationFields) > 0 {
table.Metadata["relationFields"] = relationFields
}
schema.Tables = append(schema.Tables, table)
return nil
}
func (r *Reader) parseEnum(enumName string, lines []string, schema *models.Schema) {
enum := &models.Enum{
Name: enumName,
Schema: schema.Name,
Values: make([]string, 0),
}
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
continue
}
// Enum values are simple identifiers
enum.Values = append(enum.Values, trimmed)
}
schema.Enums = append(schema.Enums, enum)
}

View File

@@ -0,0 +1,362 @@
package graphql
import (
"path/filepath"
"testing"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
)
func TestReader_ReadDatabase_Simple(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "simple.graphql"),
}
reader := NewReader(opts)
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase() error = %v", err)
}
if len(db.Schemas) == 0 {
t.Fatal("Expected at least one schema")
}
schema := db.Schemas[0]
if schema.Name != "public" {
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
}
if len(schema.Tables) != 1 {
t.Fatalf("Expected 1 table, got %d", len(schema.Tables))
}
userTable := schema.Tables[0]
if userTable.Name != "User" {
t.Errorf("Expected table name 'User', got '%s'", userTable.Name)
}
// Verify columns
expectedColumns := map[string]struct {
sqlType string
notNull bool
isPK bool
}{
"id": {"bigint", true, true},
"email": {"text", true, false},
"name": {"text", false, false},
"age": {"integer", false, false},
"active": {"boolean", true, false},
}
if len(userTable.Columns) != len(expectedColumns) {
t.Fatalf("Expected %d columns, got %d", len(expectedColumns), len(userTable.Columns))
}
for colName, expected := range expectedColumns {
col, exists := userTable.Columns[colName]
if !exists {
t.Errorf("Expected column '%s' not found", colName)
continue
}
if col.Type != expected.sqlType {
t.Errorf("Column '%s': expected type '%s', got '%s'", colName, expected.sqlType, col.Type)
}
if col.NotNull != expected.notNull {
t.Errorf("Column '%s': expected NotNull=%v, got %v", colName, expected.notNull, col.NotNull)
}
if col.IsPrimaryKey != expected.isPK {
t.Errorf("Column '%s': expected IsPrimaryKey=%v, got %v", colName, expected.isPK, col.IsPrimaryKey)
}
}
}
func TestReader_ReadDatabase_WithRelations(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "relations.graphql"),
}
reader := NewReader(opts)
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase() error = %v", err)
}
schema := db.Schemas[0]
if len(schema.Tables) != 2 {
t.Fatalf("Expected 2 tables, got %d", len(schema.Tables))
}
// Find Post table (should have FK to User)
var postTable *models.Table
for _, table := range schema.Tables {
if table.Name == "Post" {
postTable = table
break
}
}
if postTable == nil {
t.Fatal("Post table not found")
}
// Verify authorId FK column was created
authorIdCol, exists := postTable.Columns["authorId"]
if !exists {
t.Fatal("Expected 'authorId' FK column not found in Post table")
}
if authorIdCol.Type != "bigint" {
t.Errorf("Expected authorId type 'bigint', got '%s'", authorIdCol.Type)
}
if !authorIdCol.NotNull {
t.Error("Expected authorId to be NOT NULL")
}
// Verify FK constraint
fkConstraintFound := false
for _, constraint := range postTable.Constraints {
if constraint.Type == models.ForeignKeyConstraint {
if constraint.ReferencedTable == "User" && len(constraint.Columns) > 0 && constraint.Columns[0] == "authorId" {
fkConstraintFound = true
if constraint.OnDelete != "CASCADE" {
t.Errorf("Expected OnDelete CASCADE, got %s", constraint.OnDelete)
}
break
}
}
}
if !fkConstraintFound {
t.Error("Foreign key constraint from Post to User not found")
}
}
func TestReader_ReadDatabase_WithEnums(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "enums.graphql"),
}
reader := NewReader(opts)
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase() error = %v", err)
}
schema := db.Schemas[0]
if len(schema.Enums) != 1 {
t.Fatalf("Expected 1 enum, got %d", len(schema.Enums))
}
roleEnum := schema.Enums[0]
if roleEnum.Name != "Role" {
t.Errorf("Expected enum name 'Role', got '%s'", roleEnum.Name)
}
expectedValues := []string{"ADMIN", "USER", "GUEST"}
if len(roleEnum.Values) != len(expectedValues) {
t.Fatalf("Expected %d enum values, got %d", len(expectedValues), len(roleEnum.Values))
}
for i, expected := range expectedValues {
if roleEnum.Values[i] != expected {
t.Errorf("Expected enum value '%s' at index %d, got '%s'", expected, i, roleEnum.Values[i])
}
}
// Verify role column in User table
userTable := schema.Tables[0]
roleCol, exists := userTable.Columns["role"]
if !exists {
t.Fatal("Expected 'role' column not found")
}
if roleCol.Type != "Role" {
t.Errorf("Expected role type 'Role', got '%s'", roleCol.Type)
}
}
func TestReader_ReadDatabase_CustomScalars(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "custom_scalars.graphql"),
}
reader := NewReader(opts)
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase() error = %v", err)
}
schema := db.Schemas[0]
userTable := schema.Tables[0]
// Verify custom scalar mappings
expectedTypes := map[string]string{
"createdAt": "timestamp",
"metadata": "jsonb",
"birthDate": "date",
}
for colName, expectedType := range expectedTypes {
col, exists := userTable.Columns[colName]
if !exists {
t.Errorf("Expected column '%s' not found", colName)
continue
}
if col.Type != expectedType {
t.Errorf("Column '%s': expected type '%s', got '%s'", colName, expectedType, col.Type)
}
}
}
func TestReader_ReadDatabase_UUIDMetadata(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "simple.graphql"),
Metadata: map[string]interface{}{
"idType": "uuid",
},
}
reader := NewReader(opts)
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase() error = %v", err)
}
schema := db.Schemas[0]
userTable := schema.Tables[0]
idCol, exists := userTable.Columns["id"]
if !exists {
t.Fatal("Expected 'id' column not found")
}
if idCol.Type != "uuid" {
t.Errorf("Expected id type 'uuid' with metadata, got '%s'", idCol.Type)
}
}
func TestReader_ReadDatabase_Complex(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "complex.graphql"),
}
reader := NewReader(opts)
db, err := reader.ReadDatabase()
if err != nil {
t.Fatalf("ReadDatabase() error = %v", err)
}
schema := db.Schemas[0]
// Should have 5 tables: User, Profile, Post, Tag, and PostTag (join table)
expectedTableCount := 5
if len(schema.Tables) != expectedTableCount {
t.Fatalf("Expected %d tables, got %d", expectedTableCount, len(schema.Tables))
}
// Verify PostTag join table exists (many-to-many between Post and Tag)
var joinTable *models.Table
for _, table := range schema.Tables {
if table.Name == "PostTag" {
joinTable = table
break
}
}
if joinTable == nil {
t.Fatal("Expected PostTag join table not found")
}
// Verify join table has both FK columns
if _, exists := joinTable.Columns["postId"]; !exists {
t.Error("Expected 'postId' column in PostTag join table")
}
if _, exists := joinTable.Columns["tagId"]; !exists {
t.Error("Expected 'tagId' column in PostTag join table")
}
// Verify composite primary key
pkFound := false
for _, constraint := range joinTable.Constraints {
if constraint.Type == models.PrimaryKeyConstraint {
if len(constraint.Columns) == 2 {
pkFound = true
}
break
}
}
if !pkFound {
t.Error("Expected composite primary key in PostTag join table")
}
}
func TestReader_ReadSchema(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "simple.graphql"),
}
reader := NewReader(opts)
schema, err := reader.ReadSchema()
if err != nil {
t.Fatalf("ReadSchema() error = %v", err)
}
if schema.Name != "public" {
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
}
if len(schema.Tables) != 1 {
t.Errorf("Expected 1 table, got %d", len(schema.Tables))
}
}
func TestReader_ReadTable(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "simple.graphql"),
}
reader := NewReader(opts)
table, err := reader.ReadTable()
if err != nil {
t.Fatalf("ReadTable() error = %v", err)
}
if table.Name != "User" {
t.Errorf("Expected table name 'User', got '%s'", table.Name)
}
}
func TestReader_InvalidPath(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: "/nonexistent/path.graphql",
}
reader := NewReader(opts)
_, err := reader.ReadDatabase()
if err == nil {
t.Error("Expected error for invalid path, got nil")
}
}
func TestReader_EmptyPath(t *testing.T) {
opts := &readers.ReaderOptions{
FilePath: "",
}
reader := NewReader(opts)
_, err := reader.ReadDatabase()
if err == nil {
t.Error("Expected error for empty path, got nil")
}
}

View File

@@ -0,0 +1,225 @@
package graphql
import (
"fmt"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
)
func (r *Reader) detectAndCreateRelationships(schema *models.Schema, ctx *parseContext) error {
// Build table lookup map
tableMap := make(map[string]*models.Table)
for _, table := range schema.Tables {
tableMap[table.Name] = table
}
// Process each table's relation fields
for _, table := range schema.Tables {
relationFields, ok := table.Metadata["relationFields"].(map[string]*fieldInfo)
if !ok || len(relationFields) == 0 {
continue
}
for fieldName, fieldInfo := range relationFields {
targetTable, exists := tableMap[fieldInfo.typeName]
if !exists {
// Referenced type doesn't exist - might be an interface/union, skip
continue
}
if fieldInfo.isArray {
// This is a one-to-many or many-to-many reverse side
// Check if target table has a reverse array field
if r.hasReverseArrayField(targetTable, table.Name) {
// Bidirectional array = many-to-many
// Only create join table once (lexicographically first table creates it)
if table.Name < targetTable.Name {
if err := r.createManyToManyJoinTable(schema, table, targetTable, fieldName, tableMap); err != nil {
return err
}
}
}
// For one-to-many, no action needed (FK is on the other table)
} else {
// This is a many-to-one or one-to-one
// Create FK column on this table
if err := r.createForeignKeyColumn(table, targetTable, fieldName, fieldInfo.isNullable, schema); err != nil {
return err
}
}
}
}
// Clean up metadata
for _, table := range schema.Tables {
delete(table.Metadata, "relationFields")
}
return nil
}
func (r *Reader) hasReverseArrayField(table *models.Table, targetTypeName string) bool {
relationFields, ok := table.Metadata["relationFields"].(map[string]*fieldInfo)
if !ok {
return false
}
for _, fieldInfo := range relationFields {
if fieldInfo.typeName == targetTypeName && fieldInfo.isArray {
return true
}
}
return false
}
func (r *Reader) createForeignKeyColumn(fromTable, toTable *models.Table, fieldName string, nullable bool, schema *models.Schema) error {
// Get primary key from target table
pkCol := toTable.GetPrimaryKey()
if pkCol == nil {
return fmt.Errorf("target table %s has no primary key for relationship", toTable.Name)
}
// Create FK column name: {fieldName}Id
fkColName := fieldName + "Id"
// Check if column already exists (shouldn't happen but be safe)
if _, exists := fromTable.Columns[fkColName]; exists {
return nil
}
// Create FK column
fkCol := models.InitColumn(fkColName, fromTable.Name, schema.Name)
fkCol.Type = pkCol.Type
fkCol.NotNull = !nullable
fromTable.Columns[fkColName] = fkCol
// Create FK constraint
constraint := models.InitConstraint(
fmt.Sprintf("fk_%s_%s", fromTable.Name, fieldName),
models.ForeignKeyConstraint,
)
constraint.Schema = schema.Name
constraint.Table = fromTable.Name
constraint.Columns = []string{fkColName}
constraint.ReferencedSchema = schema.Name
constraint.ReferencedTable = toTable.Name
constraint.ReferencedColumns = []string{pkCol.Name}
constraint.OnDelete = "CASCADE"
constraint.OnUpdate = "RESTRICT"
fromTable.Constraints[constraint.Name] = constraint
// Create relationship
relationship := models.InitRelationship(
fmt.Sprintf("rel_%s_%s", fromTable.Name, fieldName),
models.OneToMany,
)
relationship.FromTable = fromTable.Name
relationship.FromSchema = schema.Name
relationship.FromColumns = []string{fkColName}
relationship.ToTable = toTable.Name
relationship.ToSchema = schema.Name
relationship.ToColumns = []string{pkCol.Name}
relationship.ForeignKey = constraint.Name
fromTable.Relationships[relationship.Name] = relationship
return nil
}
func (r *Reader) createManyToManyJoinTable(schema *models.Schema, table1, table2 *models.Table, fieldName string, tableMap map[string]*models.Table) error {
// Create join table name
joinTableName := table1.Name + table2.Name
// Check if join table already exists
if _, exists := tableMap[joinTableName]; exists {
return nil
}
// Get primary keys
pk1 := table1.GetPrimaryKey()
pk2 := table2.GetPrimaryKey()
if pk1 == nil || pk2 == nil {
return fmt.Errorf("cannot create many-to-many: tables must have primary keys")
}
// Create join table
joinTable := models.InitTable(joinTableName, schema.Name)
// Create FK column for table1
fkCol1Name := strings.ToLower(table1.Name) + "Id"
fkCol1 := models.InitColumn(fkCol1Name, joinTable.Name, schema.Name)
fkCol1.Type = pk1.Type
fkCol1.NotNull = true
joinTable.Columns[fkCol1Name] = fkCol1
// Create FK column for table2
fkCol2Name := strings.ToLower(table2.Name) + "Id"
fkCol2 := models.InitColumn(fkCol2Name, joinTable.Name, schema.Name)
fkCol2.Type = pk2.Type
fkCol2.NotNull = true
joinTable.Columns[fkCol2Name] = fkCol2
// Create composite primary key
pkConstraint := models.InitConstraint(
fmt.Sprintf("pk_%s", joinTableName),
models.PrimaryKeyConstraint,
)
pkConstraint.Schema = schema.Name
pkConstraint.Table = joinTable.Name
pkConstraint.Columns = []string{fkCol1Name, fkCol2Name}
joinTable.Constraints[pkConstraint.Name] = pkConstraint
// Create FK constraint to table1
fk1 := models.InitConstraint(
fmt.Sprintf("fk_%s_%s", joinTableName, table1.Name),
models.ForeignKeyConstraint,
)
fk1.Schema = schema.Name
fk1.Table = joinTable.Name
fk1.Columns = []string{fkCol1Name}
fk1.ReferencedSchema = schema.Name
fk1.ReferencedTable = table1.Name
fk1.ReferencedColumns = []string{pk1.Name}
fk1.OnDelete = "CASCADE"
fk1.OnUpdate = "RESTRICT"
joinTable.Constraints[fk1.Name] = fk1
// Create FK constraint to table2
fk2 := models.InitConstraint(
fmt.Sprintf("fk_%s_%s", joinTableName, table2.Name),
models.ForeignKeyConstraint,
)
fk2.Schema = schema.Name
fk2.Table = joinTable.Name
fk2.Columns = []string{fkCol2Name}
fk2.ReferencedSchema = schema.Name
fk2.ReferencedTable = table2.Name
fk2.ReferencedColumns = []string{pk2.Name}
fk2.OnDelete = "CASCADE"
fk2.OnUpdate = "RESTRICT"
joinTable.Constraints[fk2.Name] = fk2
// Create relationships
rel1 := models.InitRelationship(
fmt.Sprintf("rel_%s_%s_%s", joinTableName, table1.Name, table2.Name),
models.ManyToMany,
)
rel1.FromTable = table1.Name
rel1.FromSchema = schema.Name
rel1.ToTable = table2.Name
rel1.ToSchema = schema.Name
rel1.ThroughTable = joinTableName
rel1.ThroughSchema = schema.Name
joinTable.Relationships[rel1.Name] = rel1
// Add join table to schema
schema.Tables = append(schema.Tables, joinTable)
tableMap[joinTableName] = joinTable
return nil
}

View File

@@ -0,0 +1,97 @@
package graphql
func (r *Reader) isScalarType(typeName string, ctx *parseContext) bool {
// Built-in GraphQL scalars
builtInScalars := map[string]bool{
"ID": true,
"String": true,
"Int": true,
"Float": true,
"Boolean": true,
}
if builtInScalars[typeName] {
return true
}
// Custom scalars declared in the schema
if ctx.customScalars[typeName] {
return true
}
// Common custom scalars (even if not declared)
commonCustomScalars := map[string]bool{
"DateTime": true,
"JSON": true,
"Date": true,
"Time": true,
"Upload": true,
"Decimal": true,
}
return commonCustomScalars[typeName]
}
func (r *Reader) graphQLTypeToSQL(gqlType string, fieldName string, typeName string) string {
// Check for ID type with configurable mapping
if gqlType == "ID" {
// Check metadata for ID type preference
if r.options.Metadata != nil {
// Global idType setting
if idType, ok := r.options.Metadata["idType"].(string); ok {
if idType == "uuid" {
return "uuid"
}
}
// Per-type ID mapping
if typeIdMappings, ok := r.options.Metadata["typeIdMappings"].(map[string]string); ok {
if idType, ok := typeIdMappings[typeName]; ok {
if idType == "uuid" {
return "uuid"
}
}
}
}
return "bigint" // Default
}
// Custom scalar mappings
if r.options.Metadata != nil {
if customMappings, ok := r.options.Metadata["customScalarMappings"].(map[string]string); ok {
if sqlType, ok := customMappings[gqlType]; ok {
return sqlType
}
}
}
// Built-in custom scalar mappings
customScalars := map[string]string{
"DateTime": "timestamp",
"JSON": "jsonb",
"Date": "date",
"Time": "time",
"Decimal": "numeric",
"Upload": "bytea",
}
if sqlType, ok := customScalars[gqlType]; ok {
return sqlType
}
// Standard scalar mappings
typeMap := map[string]string{
"String": "text",
"Int": "integer",
"Float": "double precision",
"Boolean": "boolean",
}
if sqlType, ok := typeMap[gqlType]; ok {
return sqlType
}
// If not a known scalar, assume it's an enum or custom type
// Return as-is (might be an enum)
return gqlType
}

152
pkg/readers/json/README.md Normal file
View File

@@ -0,0 +1,152 @@
# JSON Reader
Reads database schema definitions from JSON files.
## Overview
The JSON Reader parses JSON files that define database schemas in RelSpec's canonical JSON format and converts them into RelSpec's internal database model representation.
## Features
- Reads RelSpec's standard JSON schema format
- Supports complete schema representation including:
- Databases and schemas
- Tables, columns, and data types
- Constraints (PK, FK, unique, check)
- Indexes
- Relationships
- Views and sequences
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/schema.json",
}
reader := json.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read JSON schema and convert to GORM models
relspec --input json --in-file schema.json --output gorm --out-file models.go
# Convert JSON to PostgreSQL DDL
relspec --input json --in-file database.json --output pgsql --out-file schema.sql
# Transform JSON to YAML
relspec --input json --in-file schema.json --output yaml --out-file schema.yaml
```
## Example JSON Schema
```json
{
"name": "myapp",
"database_type": "postgresql",
"schemas": [
{
"name": "public",
"tables": [
{
"name": "users",
"schema": "public",
"columns": {
"id": {
"name": "id",
"type": "bigint",
"not_null": true,
"is_primary_key": true,
"auto_increment": true,
"sequence": 1
},
"username": {
"name": "username",
"type": "varchar",
"length": 50,
"not_null": true,
"sequence": 2
},
"email": {
"name": "email",
"type": "varchar",
"length": 100,
"not_null": true,
"sequence": 3
}
},
"constraints": {
"pk_users": {
"name": "pk_users",
"type": "PRIMARY KEY",
"columns": ["id"]
},
"uq_users_username": {
"name": "uq_users_username",
"type": "UNIQUE",
"columns": ["username"]
}
},
"indexes": {
"idx_users_email": {
"name": "idx_users_email",
"columns": ["email"],
"unique": false,
"type": "btree"
}
}
}
]
}
]
}
```
## Schema Structure
The JSON format follows RelSpec's internal model structure:
- `Database` - Top-level container
- `name` - Database name
- `database_type` - Database system (postgresql, mysql, etc.)
- `schemas[]` - Array of schemas
- `Schema` - Schema/namespace
- `name` - Schema name
- `tables[]` - Array of tables
- `views[]` - Array of views
- `sequences[]` - Array of sequences
- `Table` - Table definition
- `name` - Table name
- `columns{}` - Map of columns
- `constraints{}` - Map of constraints
- `indexes{}` - Map of indexes
- `relationships{}` - Map of relationships
## Notes
- This is RelSpec's native interchange format
- Preserves complete schema information
- Ideal for version control and schema documentation
- Can be used as an intermediate format for transformations

138
pkg/readers/pgsql/README.md Normal file
View File

@@ -0,0 +1,138 @@
# PostgreSQL Reader
Reads schema information directly from a live PostgreSQL database.
## Overview
The PostgreSQL Reader connects to a PostgreSQL database and introspects its schema, extracting complete information about tables, columns, constraints, indexes, views, and sequences.
## Features
- Direct database introspection
- Extracts complete schema information including:
- Tables and columns
- Primary keys, foreign keys, unique constraints, check constraints
- Indexes
- Views
- Sequences
- Supports multiple schemas
- Captures constraint actions (ON DELETE, ON UPDATE)
- Derives relationships from foreign keys
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
)
func main() {
options := &readers.ReaderOptions{
ConnectionString: "postgres://user:password@localhost:5432/mydb?sslmode=disable",
}
reader := pgsql.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Database: %s\n", db.Name)
fmt.Printf("Schemas: %d\n", len(db.Schemas))
for _, schema := range db.Schemas {
fmt.Printf(" Schema: %s, Tables: %d\n", schema.Name, len(schema.Tables))
}
}
```
### CLI Example
```bash
# Inspect PostgreSQL database and export to JSON
relspec --input pgsql \
--conn "postgres://user:password@localhost:5432/mydb" \
--output json \
--out-file schema.json
# Generate GORM models from PostgreSQL database
relspec --input pgsql \
--conn "postgres://user:password@localhost:5432/mydb" \
--output gorm \
--out-file models.go
# Export database structure to YAML
relspec --input pgsql \
--conn "postgres://localhost/mydb?sslmode=disable" \
--output yaml \
--out-file schema.yaml
```
## Connection String Format
The reader uses PostgreSQL connection strings in the format:
```
postgres://username:password@hostname:port/database?parameters
```
Examples:
```
postgres://localhost/mydb
postgres://user:pass@localhost:5432/mydb
postgres://user@localhost/mydb?sslmode=disable
postgres://user:pass@db.example.com:5432/production?sslmode=require
```
## Extracted Information
### Tables
- Table name and schema
- Comments/descriptions
- All columns with data types, nullable, defaults
- Sequences
### Columns
- Column name, data type, length/precision
- NULL/NOT NULL constraints
- Default values
- Auto-increment information
- Primary key designation
### Constraints
- Primary keys
- Foreign keys (with ON DELETE/UPDATE actions)
- Unique constraints
- Check constraints
### Indexes
- Index name and type (btree, hash, gist, gin, etc.)
- Columns in index
- Unique/non-unique
- Partial indexes
### Views
- View definitions
- Column information
### Sequences
- Sequence properties
- Associated tables
## Notes
- Requires PostgreSQL connection permissions
- Reads all non-system schemas (excludes pg_catalog, information_schema, pg_toast)
- Captures PostgreSQL-specific data types
- Automatically maps PostgreSQL types to canonical types
- Preserves relationship metadata for downstream conversion
## Requirements
- Go library: `github.com/jackc/pgx/v5`
- Database user must have SELECT permissions on system catalogs

View File

@@ -0,0 +1,103 @@
# Prisma Reader
Reads Prisma schema files and extracts database schema information.
## Overview
The Prisma Reader parses `.prisma` schema files that define database models using Prisma's schema language and converts them into RelSpec's internal database model representation.
## Features
- Parses Prisma schema syntax
- Extracts models, fields, and relationships
- Supports Prisma attributes and directives
- Handles enums and composite types
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/prisma"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/schema.prisma",
}
reader := prisma.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read Prisma schema and convert to JSON
relspec --input prisma --in-file schema.prisma --output json --out-file schema.json
# Convert Prisma to GORM models
relspec --input prisma --in-file schema.prisma --output gorm --out-file models.go
```
## Example Prisma Schema
```prisma
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
generator client {
provider = "prisma-client-js"
}
model User {
id Int @id @default(autoincrement())
username String @unique @db.VarChar(50)
email String @db.VarChar(100)
createdAt DateTime @default(now()) @map("created_at")
posts Post[]
@@map("users")
}
model Post {
id Int @id @default(autoincrement())
userId Int @map("user_id")
title String @db.VarChar(200)
content String @db.Text
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
@@map("posts")
}
```
## Supported Prisma Attributes
- `@id` - Primary key
- `@unique` - Unique constraint
- `@default` - Default value
- `@map` - Column name mapping
- `@@map` - Table name mapping
- `@relation` - Relationship definition
- `@db.*` - Database-specific type annotations
## Notes
- Extracts datasource provider information
- Supports `@@map` for custom table names
- Handles Prisma-specific types and converts them to standard SQL types

View File

@@ -0,0 +1,122 @@
# TypeORM Reader
Reads TypeScript files containing TypeORM entity definitions and extracts database schema information.
## Overview
The TypeORM Reader parses TypeScript source files that define TypeORM entities (classes with TypeORM decorators) and converts them into RelSpec's internal database model representation.
## Features
- Parses TypeORM decorators and entity definitions
- Extracts table, column, and relationship information
- Supports various TypeORM column types and options
- Handles constraints, indexes, and relationships
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/typeorm"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/entities/",
}
reader := typeorm.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read TypeORM entities and convert to JSON
relspec --input typeorm --in-file entities/ --output json --out-file schema.json
# Convert TypeORM to GORM models
relspec --input typeorm --in-file User.ts --output gorm --out-file models.go
```
## Example TypeORM Entity
```typescript
import {
Entity,
PrimaryGeneratedColumn,
Column,
CreateDateColumn,
OneToMany,
} from 'typeorm';
import { Post } from './Post';
@Entity('users')
export class User {
@PrimaryGeneratedColumn('increment')
id: number;
@Column({ type: 'varchar', length: 50, unique: true })
username: string;
@Column({ type: 'varchar', length: 100 })
email: string;
@CreateDateColumn({ name: 'created_at' })
createdAt: Date;
@OneToMany(() => Post, (post) => post.user)
posts: Post[];
}
@Entity('posts')
export class Post {
@PrimaryGeneratedColumn('increment')
id: number;
@Column({ name: 'user_id' })
userId: number;
@Column({ type: 'varchar', length: 200 })
title: string;
@Column({ type: 'text' })
content: string;
@ManyToOne(() => User, (user) => user.posts, { onDelete: 'CASCADE' })
@JoinColumn({ name: 'user_id' })
user: User;
}
```
## Supported TypeORM Decorators
- `@Entity()` - Entity/table definition
- `@PrimaryGeneratedColumn()` - Auto-increment primary key
- `@PrimaryColumn()` - Primary key
- `@Column()` - Column definition
- `@CreateDateColumn()` - Auto-set creation timestamp
- `@UpdateDateColumn()` - Auto-update timestamp
- `@OneToMany()` - One-to-many relationship
- `@ManyToOne()` - Many-to-one relationship
- `@JoinColumn()` - Foreign key column
- `@Index()` - Index definition
- `@Unique()` - Unique constraint
## Notes
- Schema name can be specified in `@Entity()` decorator
- Supports both JavaScript and TypeScript entity files
- Relationship metadata is extracted from decorators

159
pkg/readers/yaml/README.md Normal file
View File

@@ -0,0 +1,159 @@
# YAML Reader
Reads database schema definitions from YAML files.
## Overview
The YAML Reader parses YAML files that define database schemas in RelSpec's canonical YAML format and converts them into RelSpec's internal database model representation.
## Features
- Reads RelSpec's standard YAML schema format
- Human-readable alternative to JSON format
- Supports complete schema representation including:
- Databases and schemas
- Tables, columns, and data types
- Constraints (PK, FK, unique, check)
- Indexes
- Relationships
- Views and sequences
## Usage
### Basic Example
```go
package main
import (
"fmt"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
)
func main() {
options := &readers.ReaderOptions{
FilePath: "/path/to/schema.yaml",
}
reader := yaml.NewReader(options)
db, err := reader.ReadDatabase()
if err != nil {
panic(err)
}
fmt.Printf("Found %d schemas\n", len(db.Schemas))
}
```
### CLI Example
```bash
# Read YAML schema and convert to GORM models
relspec --input yaml --in-file schema.yaml --output gorm --out-file models.go
# Convert YAML to PostgreSQL DDL
relspec --input yaml --in-file database.yaml --output pgsql --out-file schema.sql
# Transform YAML to JSON
relspec --input yaml --in-file schema.yaml --output json --out-file schema.json
```
## Example YAML Schema
```yaml
name: myapp
database_type: postgresql
schemas:
- name: public
tables:
- name: users
schema: public
columns:
id:
name: id
type: bigint
not_null: true
is_primary_key: true
auto_increment: true
sequence: 1
username:
name: username
type: varchar
length: 50
not_null: true
sequence: 2
email:
name: email
type: varchar
length: 100
not_null: true
sequence: 3
constraints:
pk_users:
name: pk_users
type: PRIMARY KEY
columns:
- id
uq_users_username:
name: uq_users_username
type: UNIQUE
columns:
- username
indexes:
idx_users_email:
name: idx_users_email
columns:
- email
unique: false
type: btree
- name: posts
schema: public
columns:
id:
name: id
type: bigint
not_null: true
is_primary_key: true
sequence: 1
user_id:
name: user_id
type: bigint
not_null: true
sequence: 2
title:
name: title
type: varchar
length: 200
not_null: true
sequence: 3
constraints:
fk_posts_user_id:
name: fk_posts_user_id
type: FOREIGN KEY
columns:
- user_id
referenced_table: users
referenced_schema: public
referenced_columns:
- id
on_delete: CASCADE
on_update: NO ACTION
```
## Schema Structure
The YAML format mirrors RelSpec's internal model structure with human-readable syntax:
- Database level: `name`, `database_type`, `schemas`
- Schema level: `name`, `tables`, `views`, `sequences`
- Table level: `name`, `schema`, `columns`, `constraints`, `indexes`, `relationships`
- Column level: `name`, `type`, `length`, `not_null`, `default`, etc.
## Notes
- YAML format is more human-readable than JSON
- Ideal for manual editing and version control
- Comments are supported in YAML
- Preserves complete schema information
- Can be used for configuration and documentation

129
pkg/writers/bun/README.md Normal file
View File

@@ -0,0 +1,129 @@
# Bun Writer
Generates Go source files with Bun model definitions from database schema information.
## Overview
The Bun Writer converts RelSpec's internal database model representation into Go source code with Bun struct definitions, complete with proper tags, relationships, and table configuration.
## Features
- Generates Bun-compatible Go structs
- Creates proper `bun` struct tags
- Adds relationship fields
- Supports both single-file and multi-file output
- Maps SQL types to Go types
- Handles nullable fields with sql.Null* types
- Generates table aliases
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/bun"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
}
writer := bun.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate Bun models from PostgreSQL database
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output bun \
--out-file models.go \
--package models
# Convert GORM models to Bun
relspec --input gorm --in-file gorm_models.go --output bun --out-file bun_models.go
# Multi-file output
relspec --input json --in-file schema.json --output bun --out-file models/
```
## Generated Code Example
```go
package models
import (
"time"
"database/sql"
"github.com/uptrace/bun"
)
type User struct {
bun.BaseModel `bun:"table:users,alias:u"`
ID int64 `bun:"id,pk,autoincrement" json:"id"`
Username string `bun:"username,notnull,unique" json:"username"`
Email string `bun:"email,notnull" json:"email"`
Bio sql.NullString `bun:"bio" json:"bio,omitempty"`
CreatedAt time.Time `bun:"created_at,notnull,default:now()" json:"created_at"`
// Relationships
Posts []*Post `bun:"rel:has-many,join:id=user_id" json:"posts,omitempty"`
}
type Post struct {
bun.BaseModel `bun:"table:posts,alias:p"`
ID int64 `bun:"id,pk" json:"id"`
UserID int64 `bun:"user_id,notnull" json:"user_id"`
Title string `bun:"title,notnull" json:"title"`
Content sql.NullString `bun:"content" json:"content,omitempty"`
// Belongs to
User *User `bun:"rel:belongs-to,join:user_id=id" json:"user,omitempty"`
}
```
## Supported Bun Tags
- `table` - Table name and alias
- `column` - Column name (auto-derived if not specified)
- `pk` - Primary key
- `autoincrement` - Auto-increment
- `notnull` - NOT NULL constraint
- `unique` - Unique constraint
- `default` - Default value
- `rel` - Relationship definition
- `type` - Explicit SQL type
## Type Mapping
| SQL Type | Go Type | Nullable Type |
|----------|---------|---------------|
| bigint | int64 | sql.NullInt64 |
| integer | int | sql.NullInt32 |
| varchar, text | string | sql.NullString |
| boolean | bool | sql.NullBool |
| timestamp | time.Time | sql.NullTime |
| numeric | float64 | sql.NullFloat64 |
## Notes
- Model names are derived from table names (singularized, PascalCase)
- Table aliases are auto-generated from table names
- Multi-file mode: one file per table named `sql_{schema}_{table}.go`
- Generated code is auto-formatted
- JSON tags are automatically added

161
pkg/writers/dbml/README.md Normal file
View File

@@ -0,0 +1,161 @@
# DBML Writer
Generates Database Markup Language (DBML) files from database schema information.
## Overview
The DBML Writer converts RelSpec's internal database model representation into DBML syntax, suitable for use with dbdiagram.io and other DBML-compatible tools.
## Features
- Generates DBML syntax
- Creates table definitions with columns
- Defines relationships
- Includes indexes
- Adds notes and documentation
- Supports enums
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/dbml"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "schema.dbml",
}
writer := dbml.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate DBML from PostgreSQL database
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output dbml \
--out-file schema.dbml
# Convert GORM models to DBML
relspec --input gorm --in-file models.go --output dbml --out-file database.dbml
# Convert JSON to DBML for visualization
relspec --input json --in-file schema.json --output dbml --out-file diagram.dbml
```
## Generated DBML Example
```dbml
Project MyDatabase {
database_type: 'PostgreSQL'
}
Table users {
id bigserial [pk, increment]
username varchar(50) [not null, unique]
email varchar(100) [not null]
bio text [null]
created_at timestamp [not null, default: `now()`]
Note: 'Users table'
indexes {
email [name: 'idx_users_email']
}
}
Table posts {
id bigserial [pk, increment]
user_id bigint [not null]
title varchar(200) [not null]
content text [null]
created_at timestamp [default: `now()`]
indexes {
user_id [name: 'idx_posts_user_id']
(user_id, created_at) [name: 'idx_posts_user_created']
}
}
Ref: posts.user_id > users.id [delete: cascade, update: no action]
```
## DBML Features
### Table Definitions
```dbml
Table table_name {
column_name type [attributes]
}
```
### Column Attributes
- `pk` - Primary key
- `increment` - Auto-increment
- `not null` - NOT NULL constraint
- `null` - Nullable (explicit)
- `unique` - Unique constraint
- `default: value` - Default value
- `note: 'text'` - Column note
### Relationships
```dbml
Ref: table1.column > table2.column
Ref: table1.column < table2.column
Ref: table1.column - table2.column
```
Relationship types:
- `>` - Many-to-one
- `<` - One-to-many
- `-` - One-to-one
Relationship actions:
```dbml
Ref: posts.user_id > users.id [delete: cascade, update: restrict]
```
### Indexes
```dbml
indexes {
column_name
(column1, column2) [name: 'idx_name', unique]
}
```
## Type Mapping
| SQL Type | DBML Type |
|----------|-----------|
| bigint | bigint |
| integer | int |
| varchar(n) | varchar(n) |
| text | text |
| boolean | boolean |
| timestamp | timestamp |
| date | date |
| json | json |
| uuid | uuid |
## Notes
- DBML is designed for database visualization
- Can be imported into dbdiagram.io
- Human-readable format
- Schema names can be included in table names
- Comments and notes are preserved
- Ideal for documentation and sharing designs

111
pkg/writers/dctx/README.md Normal file
View File

@@ -0,0 +1,111 @@
# DCTX Writer
Generates Clarion database dictionary (DCTX) files from database schema information.
## Overview
The DCTX Writer converts RelSpec's internal database model representation into Clarion dictionary XML format, used by the Clarion development platform.
## Features
- Generates DCTX XML format
- Creates file (table) definitions
- Defines fields (columns) with Clarion types
- Includes keys (indexes)
- Handles relationships
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/dctx"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "database.dctx",
}
writer := dctx.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate DCTX from PostgreSQL database (for Clarion migration)
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output dctx \
--out-file app.dctx
# Convert GORM models to DCTX
relspec --input gorm --in-file models.go --output dctx --out-file legacy.dctx
# Convert JSON schema to DCTX
relspec --input json --in-file schema.json --output dctx --out-file database.dctx
```
## Type Mapping
Converts standard SQL types to Clarion types:
| SQL Type | Clarion Type | Notes |
|----------|--------------|-------|
| VARCHAR(n) | STRING(n) | Fixed-length string |
| TEXT | STRING | Variable length |
| INTEGER | LONG | 32-bit integer |
| BIGINT | DECIMAL(20,0) | Large integer |
| SMALLINT | SHORT | 16-bit integer |
| NUMERIC(p,s) | DECIMAL(p,s) | Decimal number |
| REAL, FLOAT | REAL | Floating point |
| BOOLEAN | BYTE | 0/1 value |
| DATE | DATE | Date field |
| TIME | TIME | Time field |
| TIMESTAMP | LONG | Unix timestamp |
## DCTX Structure
DCTX files are XML-based with this structure:
```xml
<?xml version="1.0"?>
<dictionary>
<file name="USERS" driver="TOPSPEED">
<record>
<field name="ID" type="LONG" />
<field name="USERNAME" type="STRING" bytes="50" />
<field name="EMAIL" type="STRING" bytes="100" />
</record>
<key name="KEY_PRIMARY" primary="true">
<field name="ID" />
</key>
</file>
</dictionary>
```
## Features
- File definitions (equivalent to tables)
- Field definitions with Clarion-specific types
- Key definitions (primary and foreign)
- Relationships between files
- Driver specifications (TOPSPEED, SQL, etc.)
## Notes
- DCTX is specific to Clarion development
- Useful for legacy system integration
- Field names are typically uppercase in Clarion
- Supports Clarion-specific attributes
- Can be imported into Clarion IDE

View File

@@ -0,0 +1,182 @@
# DrawDB Writer
Generates DrawDB-compatible JSON files from database schema information.
## Overview
The DrawDB Writer converts RelSpec's internal database model representation into JSON format compatible with DrawDB, a free online database design tool.
## Features
- Generates DrawDB JSON format
- Creates table and field definitions
- Defines relationships
- Includes visual layout information
- Preserves constraints and indexes
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "diagram.json",
}
writer := drawdb.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate DrawDB diagram from PostgreSQL database
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output drawdb \
--out-file diagram.json
# Convert GORM models to DrawDB for visualization
relspec --input gorm --in-file models.go --output drawdb --out-file design.json
# Convert JSON schema to DrawDB
relspec --input json --in-file schema.json --output drawdb --out-file diagram.json
```
## Generated JSON Example
```json
{
"version": "1.0",
"database": "PostgreSQL",
"tables": [
{
"id": "1",
"name": "users",
"x": 100,
"y": 100,
"fields": [
{
"id": "1",
"name": "id",
"type": "BIGINT",
"primary": true,
"autoIncrement": true,
"notNull": true
},
{
"id": "2",
"name": "username",
"type": "VARCHAR",
"size": 50,
"notNull": true,
"unique": true
},
{
"id": "3",
"name": "email",
"type": "VARCHAR",
"size": 100,
"notNull": true
}
],
"indexes": [
{
"name": "idx_users_email",
"fields": ["email"]
}
]
},
{
"id": "2",
"name": "posts",
"x": 400,
"y": 100,
"fields": [
{
"id": "1",
"name": "id",
"type": "BIGINT",
"primary": true
},
{
"id": "2",
"name": "user_id",
"type": "BIGINT",
"notNull": true
},
{
"id": "3",
"name": "title",
"type": "VARCHAR",
"size": 200,
"notNull": true
}
]
}
],
"relationships": [
{
"id": "1",
"source": "2",
"target": "1",
"sourceField": "user_id",
"targetField": "id",
"type": "many-to-one",
"onDelete": "CASCADE"
}
]
}
```
## DrawDB Features
### Table Properties
- `id` - Unique table identifier
- `name` - Table name
- `x`, `y` - Position in diagram
- `fields` - Array of field definitions
- `indexes` - Array of index definitions
### Field Properties
- `id` - Unique field identifier
- `name` - Field name
- `type` - Data type (BIGINT, VARCHAR, etc.)
- `size` - Length for string types
- `primary` - Primary key flag
- `notNull` - NOT NULL constraint
- `unique` - Unique constraint
- `autoIncrement` - Auto-increment flag
- `default` - Default value
### Relationship Properties
- `id` - Unique relationship identifier
- `source` - Source table ID
- `target` - Target table ID
- `sourceField` - Foreign key field
- `targetField` - Referenced field
- `type` - Relationship type (one-to-one, one-to-many, many-to-one)
- `onDelete` - Delete action
- `onUpdate` - Update action
## Notes
- DrawDB is available at drawdb.vercel.app
- Generated files can be imported for visual editing
- Visual positions (x, y) are auto-generated
- Ideal for creating ERD diagrams
- Supports modern database features
- Free and open-source tool

View File

@@ -0,0 +1,120 @@
# Drizzle Writer
Generates TypeScript/JavaScript files with Drizzle ORM schema definitions from database schema information.
## Overview
The Drizzle Writer converts RelSpec's internal database model representation into TypeScript source code with Drizzle ORM schema definitions, including tables, columns, relationships, and constraints.
## Features
- Generates Drizzle-compatible TypeScript schema
- Supports PostgreSQL and MySQL schemas
- Creates table definitions with proper column types
- Generates relationship definitions
- Handles constraints and indexes
- Outputs formatted TypeScript code
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/drizzle"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "schema.ts",
Metadata: map[string]interface{}{
"database_type": "postgresql", // or "mysql"
},
}
writer := drizzle.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate Drizzle schema from PostgreSQL database
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output drizzle \
--out-file schema.ts
# Convert GORM models to Drizzle
relspec --input gorm --in-file models.go --output drizzle --out-file schema.ts
# Convert JSON schema to Drizzle
relspec --input json --in-file schema.json --output drizzle --out-file db/schema.ts
```
## Generated Code Example
```typescript
import { pgTable, serial, varchar, text, timestamp, integer } from 'drizzle-orm/pg-core';
import { relations } from 'drizzle-orm';
export const users = pgTable('users', {
id: serial('id').primaryKey(),
username: varchar('username', { length: 50 }).notNull().unique(),
email: varchar('email', { length: 100 }).notNull(),
bio: text('bio'),
createdAt: timestamp('created_at').notNull().defaultNow(),
});
export const posts = pgTable('posts', {
id: serial('id').primaryKey(),
userId: integer('user_id').notNull().references(() => users.id, { onDelete: 'cascade' }),
title: varchar('title', { length: 200 }).notNull(),
content: text('content'),
});
export const usersRelations = relations(users, ({ many }) => ({
posts: many(posts),
}));
export const postsRelations = relations(posts, ({ one }) => ({
user: one(users, {
fields: [posts.userId],
references: [users.id],
}),
}));
```
## Supported Column Types
### PostgreSQL
- `serial`, `bigserial` - Auto-increment integers
- `integer`, `bigint`, `smallint` - Integer types
- `varchar`, `text` - String types
- `boolean` - Boolean
- `timestamp`, `date`, `time` - Date/time types
- `json`, `jsonb` - JSON types
- `uuid` - UUID type
### MySQL
- `int`, `bigint`, `smallint` - Integer types
- `varchar`, `text` - String types
- `boolean` - Boolean
- `datetime`, `timestamp` - Date/time types
- `json` - JSON type
## Notes
- Table names and column names are preserved as-is
- Relationships are generated as separate relation definitions
- Constraint actions (CASCADE, etc.) are included in references
- Schema names other than 'public' are supported
- Output is formatted TypeScript code

View File

@@ -0,0 +1,221 @@
package drizzle
import (
"sort"
"git.warky.dev/wdevs/relspecgo/pkg/models"
)
// TemplateData represents the data passed to the template for code generation
type TemplateData struct {
Imports []string
Enums []*EnumData
Tables []*TableData
}
// EnumData represents an enum in the schema
type EnumData struct {
Name string // Enum name (PascalCase)
VarName string // Variable name for the enum (camelCase)
Values []string // Enum values
ValuesStr string // Comma-separated quoted values for pgEnum()
TypeUnion string // TypeScript union type (e.g., "'admin' | 'user' | 'guest'")
SchemaName string // Schema name
}
// TableData represents a table in the template
type TableData struct {
Name string // Table variable name (camelCase, e.g., users)
TableName string // Actual database table name (e.g., users)
TypeName string // TypeScript type name (PascalCase, e.g., Users)
Columns []*ColumnData // Column definitions
Indexes []*IndexData // Index definitions
Comment string // Table comment
SchemaName string // Schema name
NeedsSQLTag bool // Whether we need to import 'sql' from drizzle-orm
IndexColumnFields []string // Column field names used in indexes (for destructuring)
}
// ColumnData represents a column in a table
type ColumnData struct {
Name string // Column name in database
FieldName string // Field name in TypeScript (camelCase)
DrizzleChain string // Complete Drizzle column chain (e.g., "integer('id').primaryKey()")
TypeScriptType string // TypeScript type for interface (e.g., "string", "number | null")
IsForeignKey bool // Whether this is a foreign key
ReferencesLine string // The .references() line if FK
Comment string // Column comment
}
// IndexData represents an index definition
type IndexData struct {
Name string // Index name
Columns []string // Column names
IsUnique bool // Whether it's a unique index
Definition string // Complete index definition line
}
// NewTemplateData creates a new TemplateData
func NewTemplateData() *TemplateData {
return &TemplateData{
Imports: make([]string, 0),
Enums: make([]*EnumData, 0),
Tables: make([]*TableData, 0),
}
}
// AddImport adds an import to the template data (deduplicates automatically)
func (td *TemplateData) AddImport(importLine string) {
// Check if already exists
for _, imp := range td.Imports {
if imp == importLine {
return
}
}
td.Imports = append(td.Imports, importLine)
}
// AddEnum adds an enum to the template data
func (td *TemplateData) AddEnum(enum *EnumData) {
td.Enums = append(td.Enums, enum)
}
// AddTable adds a table to the template data
func (td *TemplateData) AddTable(table *TableData) {
td.Tables = append(td.Tables, table)
}
// FinalizeImports sorts imports
func (td *TemplateData) FinalizeImports() {
sort.Strings(td.Imports)
}
// NewEnumData creates EnumData from a models.Enum
func NewEnumData(enum *models.Enum, tm *TypeMapper) *EnumData {
// Keep enum name as-is (it should already be PascalCase from the source)
enumName := enum.Name
// Variable name is camelCase version
varName := tm.ToCamelCase(enum.Name)
// Format values as comma-separated quoted strings for pgEnum()
quotedValues := make([]string, len(enum.Values))
for i, v := range enum.Values {
quotedValues[i] = "'" + v + "'"
}
valuesStr := ""
for i, qv := range quotedValues {
if i > 0 {
valuesStr += ", "
}
valuesStr += qv
}
// Build TypeScript union type (e.g., "'admin' | 'user' | 'guest'")
typeUnion := ""
for i, qv := range quotedValues {
if i > 0 {
typeUnion += " | "
}
typeUnion += qv
}
return &EnumData{
Name: enumName,
VarName: varName,
Values: enum.Values,
ValuesStr: valuesStr,
TypeUnion: typeUnion,
SchemaName: enum.Schema,
}
}
// NewTableData creates TableData from a models.Table
func NewTableData(table *models.Table, tm *TypeMapper) *TableData {
tableName := tm.ToCamelCase(table.Name)
typeName := tm.ToPascalCase(table.Name)
return &TableData{
Name: tableName,
TableName: table.Name,
TypeName: typeName,
Columns: make([]*ColumnData, 0),
Indexes: make([]*IndexData, 0),
Comment: formatComment(table.Description, table.Comment),
SchemaName: table.Schema,
}
}
// AddColumn adds a column to the table data
func (td *TableData) AddColumn(col *ColumnData) {
td.Columns = append(td.Columns, col)
}
// AddIndex adds an index to the table data
func (td *TableData) AddIndex(idx *IndexData) {
td.Indexes = append(td.Indexes, idx)
}
// NewColumnData creates ColumnData from a models.Column
func NewColumnData(col *models.Column, table *models.Table, tm *TypeMapper, isEnum bool) *ColumnData {
fieldName := tm.ToCamelCase(col.Name)
drizzleChain := tm.BuildColumnChain(col, table, isEnum)
return &ColumnData{
Name: col.Name,
FieldName: fieldName,
DrizzleChain: drizzleChain,
Comment: formatComment(col.Description, col.Comment),
}
}
// NewIndexData creates IndexData from a models.Index
func NewIndexData(index *models.Index, tableVar string, tm *TypeMapper) *IndexData {
indexName := tm.ToCamelCase(index.Name) + "Idx"
// Build column references as field names (will be used with destructuring)
colRefs := make([]string, len(index.Columns))
for i, colName := range index.Columns {
// Use just the field name for destructured parameters
colRefs[i] = tm.ToCamelCase(colName)
}
// Build the complete definition
// Example: index('email_idx').on(email)
// or: uniqueIndex('unique_email_idx').on(email)
definition := ""
if index.Unique {
definition = "uniqueIndex('" + index.Name + "').on(" + joinStrings(colRefs, ", ") + ")"
} else {
definition = "index('" + index.Name + "').on(" + joinStrings(colRefs, ", ") + ")"
}
return &IndexData{
Name: indexName,
Columns: index.Columns,
IsUnique: index.Unique,
Definition: definition,
}
}
// formatComment combines description and comment into a single comment string
func formatComment(description, comment string) string {
if description != "" && comment != "" {
return description + " - " + comment
}
if description != "" {
return description
}
return comment
}
// joinStrings joins a slice of strings with a separator
func joinStrings(strs []string, sep string) string {
result := ""
for i, s := range strs {
if i > 0 {
result += sep
}
result += s
}
return result
}

View File

@@ -0,0 +1,64 @@
package drizzle
import (
"bytes"
"text/template"
)
// schemaTemplate defines the template for generating Drizzle schemas
const schemaTemplate = `// Code generated by relspecgo. DO NOT EDIT.
{{range .Imports}}{{.}}
{{end}}
{{if .Enums}}
// Enums
{{range .Enums}}export const {{.VarName}} = pgEnum('{{.Name}}', [{{.ValuesStr}}]);
export type {{.Name}} = {{.TypeUnion}};
{{end}}
{{end}}
{{range .Tables}}// Table: {{.TableName}}{{if .Comment}} - {{.Comment}}{{end}}
export interface {{.TypeName}} {
{{- range $i, $col := .Columns}}
{{$col.FieldName}}: {{$col.TypeScriptType}};{{if $col.Comment}} // {{$col.Comment}}{{end}}
{{- end}}
}
export const {{.Name}} = pgTable('{{.TableName}}', {
{{- range $i, $col := .Columns}}
{{$col.FieldName}}: {{$col.DrizzleChain}},{{if $col.Comment}} // {{$col.Comment}}{{end}}
{{- end}}
}{{if .Indexes}}{{if .IndexColumnFields}}, ({ {{range $i, $field := .IndexColumnFields}}{{if $i}}, {{end}}{{$field}}{{end}} }) => [{{else}}, (table) => [{{end}}
{{- range $i, $idx := .Indexes}}
{{$idx.Definition}},
{{- end}}
]{{end}});
export type New{{.TypeName}} = typeof {{.Name}}.$inferInsert;
{{end}}`
// Templates holds the parsed templates
type Templates struct {
schemaTmpl *template.Template
}
// NewTemplates creates and parses the templates
func NewTemplates() (*Templates, error) {
schemaTmpl, err := template.New("schema").Parse(schemaTemplate)
if err != nil {
return nil, err
}
return &Templates{
schemaTmpl: schemaTmpl,
}, nil
}
// GenerateCode executes the template with the given data
func (t *Templates) GenerateCode(data *TemplateData) (string, error) {
var buf bytes.Buffer
err := t.schemaTmpl.Execute(&buf, data)
if err != nil {
return "", err
}
return buf.String(), nil
}

View File

@@ -0,0 +1,318 @@
package drizzle
import (
"fmt"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
)
// TypeMapper handles SQL to Drizzle type conversions
type TypeMapper struct{}
// NewTypeMapper creates a new TypeMapper instance
func NewTypeMapper() *TypeMapper {
return &TypeMapper{}
}
// SQLTypeToDrizzle converts SQL types to Drizzle column type functions
// Returns the Drizzle column constructor (e.g., "integer", "varchar", "text")
func (tm *TypeMapper) SQLTypeToDrizzle(sqlType string) string {
sqlTypeLower := strings.ToLower(sqlType)
// PostgreSQL type mapping to Drizzle
typeMap := map[string]string{
// Integer types
"integer": "integer",
"int": "integer",
"int4": "integer",
"smallint": "smallint",
"int2": "smallint",
"bigint": "bigint",
"int8": "bigint",
// Serial types
"serial": "serial",
"serial4": "serial",
"smallserial": "smallserial",
"serial2": "smallserial",
"bigserial": "bigserial",
"serial8": "bigserial",
// Numeric types
"numeric": "numeric",
"decimal": "numeric",
"real": "real",
"float4": "real",
"double precision": "doublePrecision",
"float": "doublePrecision",
"float8": "doublePrecision",
// Character types
"text": "text",
"varchar": "varchar",
"character varying": "varchar",
"char": "char",
"character": "char",
// Boolean
"boolean": "boolean",
"bool": "boolean",
// Binary
"bytea": "bytea",
// JSON types
"json": "json",
"jsonb": "jsonb",
// Date/Time types
"time": "time",
"timetz": "time",
"timestamp": "timestamp",
"timestamptz": "timestamp",
"date": "date",
"interval": "interval",
// UUID
"uuid": "uuid",
// Geometric types
"point": "point",
"line": "line",
}
// Check for exact match first
if drizzleType, ok := typeMap[sqlTypeLower]; ok {
return drizzleType
}
// Check for partial matches (e.g., "varchar(255)" -> "varchar")
for sqlPattern, drizzleType := range typeMap {
if strings.HasPrefix(sqlTypeLower, sqlPattern) {
return drizzleType
}
}
// Default to text for unknown types
return "text"
}
// BuildColumnChain builds the complete column definition chain for Drizzle
// Example: integer('id').primaryKey().notNull()
func (tm *TypeMapper) BuildColumnChain(col *models.Column, table *models.Table, isEnum bool) string {
var parts []string
// Determine Drizzle column type
var drizzleType string
if isEnum {
// For enum types, use the type name directly
drizzleType = fmt.Sprintf("pgEnum('%s')", col.Type)
} else {
drizzleType = tm.SQLTypeToDrizzle(col.Type)
}
// Start with column type and name
// Note: column name is passed as first argument to the column constructor
base := fmt.Sprintf("%s('%s')", drizzleType, col.Name)
parts = append(parts, base)
// Add column modifiers in order
modifiers := tm.buildColumnModifiers(col, table)
if len(modifiers) > 0 {
parts = append(parts, modifiers...)
}
return strings.Join(parts, ".")
}
// buildColumnModifiers builds an array of method calls for column modifiers
func (tm *TypeMapper) buildColumnModifiers(col *models.Column, table *models.Table) []string {
var modifiers []string
// Primary key
if col.IsPrimaryKey {
modifiers = append(modifiers, "primaryKey()")
}
// Not null constraint
if col.NotNull && !col.IsPrimaryKey {
modifiers = append(modifiers, "notNull()")
}
// Unique constraint (check if there's a single-column unique constraint)
if tm.hasUniqueConstraint(col.Name, table) {
modifiers = append(modifiers, "unique()")
}
// Default value
if col.AutoIncrement {
// For auto-increment, use generatedAlwaysAsIdentity()
modifiers = append(modifiers, "generatedAlwaysAsIdentity()")
} else if col.Default != nil {
defaultValue := tm.formatDefaultValue(col.Default)
if defaultValue != "" {
modifiers = append(modifiers, fmt.Sprintf("default(%s)", defaultValue))
}
}
return modifiers
}
// formatDefaultValue formats a default value for Drizzle
func (tm *TypeMapper) formatDefaultValue(defaultValue any) string {
switch v := defaultValue.(type) {
case string:
if v == "now()" || v == "CURRENT_TIMESTAMP" {
return "sql`now()`"
} else if v == "gen_random_uuid()" || strings.Contains(strings.ToLower(v), "uuid") {
return "sql`gen_random_uuid()`"
} else {
// Try to parse as number first
// Check if it's a numeric string that should be a number
if isNumericString(v) {
return v
}
// String literal
return fmt.Sprintf("'%s'", strings.ReplaceAll(v, "'", "\\'"))
}
case bool:
if v {
return "true"
}
return "false"
case int, int64, int32, int16, int8:
return fmt.Sprintf("%v", v)
case float32, float64:
return fmt.Sprintf("%v", v)
default:
return fmt.Sprintf("%v", v)
}
}
// isNumericString checks if a string represents a number
func isNumericString(s string) bool {
if s == "" {
return false
}
// Simple check for numeric strings
for i, c := range s {
if i == 0 && c == '-' {
continue // Allow negative sign at start
}
if c < '0' || c > '9' {
if c != '.' {
return false
}
}
}
return true
}
// hasUniqueConstraint checks if a column has a unique constraint
func (tm *TypeMapper) hasUniqueConstraint(colName string, table *models.Table) bool {
for _, constraint := range table.Constraints {
if constraint.Type == models.UniqueConstraint &&
len(constraint.Columns) == 1 &&
constraint.Columns[0] == colName {
return true
}
}
return false
}
// BuildReferencesChain builds the .references() chain for foreign key columns
func (tm *TypeMapper) BuildReferencesChain(fk *models.Constraint, referencedTable string) string {
// Example: .references(() => users.id)
if len(fk.ReferencedColumns) > 0 {
// Use the referenced table variable name (camelCase)
refTableVar := tm.ToCamelCase(referencedTable)
refColumn := fk.ReferencedColumns[0]
return fmt.Sprintf("references(() => %s.%s)", refTableVar, refColumn)
}
return ""
}
// ToCamelCase converts snake_case or PascalCase to camelCase
func (tm *TypeMapper) ToCamelCase(s string) string {
if s == "" {
return s
}
// Check if it's snake_case
if strings.Contains(s, "_") {
parts := strings.Split(s, "_")
if len(parts) == 0 {
return s
}
// First part stays lowercase
result := strings.ToLower(parts[0])
// Capitalize first letter of remaining parts
for i := 1; i < len(parts); i++ {
if len(parts[i]) > 0 {
result += strings.ToUpper(parts[i][:1]) + strings.ToLower(parts[i][1:])
}
}
return result
}
// Otherwise, assume it's PascalCase - just lowercase the first letter
return strings.ToLower(s[:1]) + s[1:]
}
// ToPascalCase converts snake_case to PascalCase
func (tm *TypeMapper) ToPascalCase(s string) string {
parts := strings.Split(s, "_")
var result string
for _, part := range parts {
if len(part) > 0 {
result += strings.ToUpper(part[:1]) + strings.ToLower(part[1:])
}
}
return result
}
// DrizzleTypeToTypeScript converts Drizzle column types to TypeScript types
func (tm *TypeMapper) DrizzleTypeToTypeScript(drizzleType string, isEnum bool, enumName string) string {
if isEnum {
return enumName
}
typeMap := map[string]string{
"integer": "number",
"bigint": "number",
"smallint": "number",
"serial": "number",
"bigserial": "number",
"smallserial": "number",
"numeric": "number",
"real": "number",
"doublePrecision": "number",
"text": "string",
"varchar": "string",
"char": "string",
"boolean": "boolean",
"bytea": "Buffer",
"json": "any",
"jsonb": "any",
"timestamp": "Date",
"date": "Date",
"time": "Date",
"interval": "string",
"uuid": "string",
"point": "{ x: number; y: number }",
"line": "{ a: number; b: number; c: number }",
}
if tsType, ok := typeMap[drizzleType]; ok {
return tsType
}
// Default to any for unknown types
return "any"
}

View File

@@ -0,0 +1,543 @@
package drizzle
import (
"fmt"
"os"
"path/filepath"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
// Writer implements the writers.Writer interface for Drizzle ORM
type Writer struct {
options *writers.WriterOptions
typeMapper *TypeMapper
templates *Templates
}
// NewWriter creates a new Drizzle writer with the given options
func NewWriter(options *writers.WriterOptions) *Writer {
w := &Writer{
options: options,
typeMapper: NewTypeMapper(),
}
// Initialize templates
tmpl, err := NewTemplates()
if err != nil {
// Should not happen with embedded templates
panic(fmt.Sprintf("failed to initialize templates: %v", err))
}
w.templates = tmpl
return w
}
// WriteDatabase writes a complete database as Drizzle schema
func (w *Writer) WriteDatabase(db *models.Database) error {
// Check if multi-file mode is enabled
multiFile := w.shouldUseMultiFile()
if multiFile {
return w.writeMultiFile(db)
}
return w.writeSingleFile(db)
}
// WriteSchema writes a schema as Drizzle schema
func (w *Writer) WriteSchema(schema *models.Schema) error {
// Create a temporary database with just this schema
db := models.InitDatabase(schema.Name)
db.Schemas = []*models.Schema{schema}
return w.WriteDatabase(db)
}
// WriteTable writes a single table as a Drizzle schema
func (w *Writer) WriteTable(table *models.Table) error {
// Create a temporary schema and database
schema := models.InitSchema(table.Schema)
schema.Tables = []*models.Table{table}
db := models.InitDatabase(schema.Name)
db.Schemas = []*models.Schema{schema}
return w.WriteDatabase(db)
}
// writeSingleFile writes all tables to a single file
func (w *Writer) writeSingleFile(db *models.Database) error {
templateData := NewTemplateData()
// Build enum map for quick lookup
enumMap := w.buildEnumMap(db)
// Process all schemas
for _, schema := range db.Schemas {
// Add enums
for _, enum := range schema.Enums {
enumData := NewEnumData(enum, w.typeMapper)
templateData.AddEnum(enumData)
}
// Add tables
for _, table := range schema.Tables {
tableData := w.buildTableData(table, schema, db, enumMap)
templateData.AddTable(tableData)
}
}
// Add imports
w.addImports(templateData, db)
// Finalize imports
templateData.FinalizeImports()
// Generate code
code, err := w.templates.GenerateCode(templateData)
if err != nil {
return fmt.Errorf("failed to generate code: %w", err)
}
// Write output
return w.writeOutput(code)
}
// writeMultiFile writes each table to a separate file
func (w *Writer) writeMultiFile(db *models.Database) error {
// Ensure output path is a directory
if w.options.OutputPath == "" {
return fmt.Errorf("output path is required for multi-file mode")
}
// Create output directory if it doesn't exist
if err := os.MkdirAll(w.options.OutputPath, 0755); err != nil {
return fmt.Errorf("failed to create output directory: %w", err)
}
// Build enum map for quick lookup
enumMap := w.buildEnumMap(db)
// Process all schemas
for _, schema := range db.Schemas {
// Write enums file if there are any
if len(schema.Enums) > 0 {
if err := w.writeEnumsFile(schema); err != nil {
return err
}
}
// Write each table to a separate file
for _, table := range schema.Tables {
if err := w.writeTableFile(table, schema, db, enumMap); err != nil {
return err
}
}
}
return nil
}
// writeEnumsFile writes all enums to a separate file
func (w *Writer) writeEnumsFile(schema *models.Schema) error {
templateData := NewTemplateData()
// Add enums
for _, enum := range schema.Enums {
enumData := NewEnumData(enum, w.typeMapper)
templateData.AddEnum(enumData)
}
// Add imports for enums
templateData.AddImport("import { pgEnum } from 'drizzle-orm/pg-core';")
// Generate code
code, err := w.templates.GenerateCode(templateData)
if err != nil {
return fmt.Errorf("failed to generate enums code: %w", err)
}
// Write to enums.ts file
filename := filepath.Join(w.options.OutputPath, "enums.ts")
return os.WriteFile(filename, []byte(code), 0644)
}
// writeTableFile writes a single table to its own file
func (w *Writer) writeTableFile(table *models.Table, schema *models.Schema, db *models.Database, enumMap map[string]bool) error {
templateData := NewTemplateData()
// Build table data
tableData := w.buildTableData(table, schema, db, enumMap)
templateData.AddTable(tableData)
// Add imports
w.addImports(templateData, db)
// If there are enums, add import from enums file
if len(schema.Enums) > 0 && w.tableUsesEnum(table, enumMap) {
// Import enum definitions from enums.ts
enumNames := w.getTableEnumNames(table, schema, enumMap)
if len(enumNames) > 0 {
importLine := fmt.Sprintf("import { %s } from './enums';", strings.Join(enumNames, ", "))
templateData.AddImport(importLine)
}
}
// Finalize imports
templateData.FinalizeImports()
// Generate code
code, err := w.templates.GenerateCode(templateData)
if err != nil {
return fmt.Errorf("failed to generate code for table %s: %w", table.Name, err)
}
// Generate filename: {tableName}.ts
filename := filepath.Join(w.options.OutputPath, table.Name+".ts")
return os.WriteFile(filename, []byte(code), 0644)
}
// buildTableData builds TableData from a models.Table
func (w *Writer) buildTableData(table *models.Table, schema *models.Schema, db *models.Database, enumMap map[string]bool) *TableData {
tableData := NewTableData(table, w.typeMapper)
// Add columns
for _, colName := range w.getSortedColumnNames(table) {
col := table.Columns[colName]
// Check if this column uses an enum
isEnum := enumMap[col.Type]
columnData := NewColumnData(col, table, w.typeMapper, isEnum)
// Set TypeScript type
drizzleType := w.typeMapper.SQLTypeToDrizzle(col.Type)
enumName := ""
if isEnum {
// For enums, use the enum type name
enumName = col.Type
}
baseType := w.typeMapper.DrizzleTypeToTypeScript(drizzleType, isEnum, enumName)
// Add null union if column is nullable
if !col.NotNull && !col.IsPrimaryKey {
columnData.TypeScriptType = baseType + " | null"
} else {
columnData.TypeScriptType = baseType
}
// Check if this column is a foreign key
if fk := w.getForeignKeyForColumn(col.Name, table); fk != nil {
columnData.IsForeignKey = true
refTableName := fk.ReferencedTable
refChain := w.typeMapper.BuildReferencesChain(fk, refTableName)
if refChain != "" {
columnData.ReferencesLine = "." + refChain
// Append to the drizzle chain
columnData.DrizzleChain += columnData.ReferencesLine
}
}
tableData.AddColumn(columnData)
}
// Collect all column field names that are used in indexes
indexColumnFields := make(map[string]bool)
// Add indexes (excluding single-column unique indexes, which are handled inline)
for _, index := range table.Indexes {
// Skip single-column unique indexes (handled by .unique() modifier)
if index.Unique && len(index.Columns) == 1 {
continue
}
// Track which columns are used in indexes
for _, colName := range index.Columns {
// Find the field name for this column
if col, exists := table.Columns[colName]; exists {
fieldName := w.typeMapper.ToCamelCase(col.Name)
indexColumnFields[fieldName] = true
}
}
indexData := NewIndexData(index, tableData.Name, w.typeMapper)
tableData.AddIndex(indexData)
}
// Add multi-column unique constraints as unique indexes
for _, constraint := range table.Constraints {
if constraint.Type == models.UniqueConstraint && len(constraint.Columns) > 1 {
// Create a unique index for this constraint
indexData := &IndexData{
Name: w.typeMapper.ToCamelCase(constraint.Name) + "Idx",
Columns: constraint.Columns,
IsUnique: true,
}
// Track which columns are used in indexes
for _, colName := range constraint.Columns {
if col, exists := table.Columns[colName]; exists {
fieldName := w.typeMapper.ToCamelCase(col.Name)
indexColumnFields[fieldName] = true
}
}
// Build column references as field names (for destructuring)
colRefs := make([]string, len(constraint.Columns))
for i, colName := range constraint.Columns {
if col, exists := table.Columns[colName]; exists {
colRefs[i] = w.typeMapper.ToCamelCase(col.Name)
} else {
colRefs[i] = w.typeMapper.ToCamelCase(colName)
}
}
indexData.Definition = "uniqueIndex('" + constraint.Name + "').on(" + joinStrings(colRefs, ", ") + ")"
tableData.AddIndex(indexData)
}
}
// Convert index column fields map to sorted slice
if len(indexColumnFields) > 0 {
fields := make([]string, 0, len(indexColumnFields))
for field := range indexColumnFields {
fields = append(fields, field)
}
// Sort for consistent output
sortStrings(fields)
tableData.IndexColumnFields = fields
}
return tableData
}
// sortStrings sorts a slice of strings in place
func sortStrings(strs []string) {
for i := 0; i < len(strs); i++ {
for j := i + 1; j < len(strs); j++ {
if strs[i] > strs[j] {
strs[i], strs[j] = strs[j], strs[i]
}
}
}
}
// addImports adds the necessary imports to the template data
func (w *Writer) addImports(templateData *TemplateData, db *models.Database) {
// Determine which Drizzle imports we need
needsPgTable := len(templateData.Tables) > 0
needsPgEnum := len(templateData.Enums) > 0
needsIndex := false
needsUniqueIndex := false
needsSQL := false
// Check what we need based on tables
for _, table := range templateData.Tables {
for _, index := range table.Indexes {
if index.IsUnique {
needsUniqueIndex = true
} else {
needsIndex = true
}
}
// Check if any column uses SQL default values
for _, col := range table.Columns {
if strings.Contains(col.DrizzleChain, "sql`") {
needsSQL = true
}
}
}
// Build the import statement
imports := make([]string, 0)
if needsPgTable {
imports = append(imports, "pgTable")
}
if needsPgEnum {
imports = append(imports, "pgEnum")
}
// Add column types - for now, add common ones
// TODO: Could be optimized to only include used types
columnTypes := []string{
"integer", "bigint", "smallint",
"serial", "bigserial", "smallserial",
"text", "varchar", "char",
"boolean", "numeric", "real", "doublePrecision",
"timestamp", "date", "time", "interval",
"json", "jsonb", "uuid", "bytea",
}
imports = append(imports, columnTypes...)
if needsIndex {
imports = append(imports, "index")
}
if needsUniqueIndex {
imports = append(imports, "uniqueIndex")
}
importLine := "import { " + strings.Join(imports, ", ") + " } from 'drizzle-orm/pg-core';"
templateData.AddImport(importLine)
// Add SQL import if needed
if needsSQL {
templateData.AddImport("import { sql } from 'drizzle-orm';")
}
}
// buildEnumMap builds a map of enum type names for quick lookup
func (w *Writer) buildEnumMap(db *models.Database) map[string]bool {
enumMap := make(map[string]bool)
for _, schema := range db.Schemas {
for _, enum := range schema.Enums {
enumMap[enum.Name] = true
// Also add lowercase version for case-insensitive lookup
enumMap[strings.ToLower(enum.Name)] = true
}
}
return enumMap
}
// tableUsesEnum checks if a table uses any enum types
func (w *Writer) tableUsesEnum(table *models.Table, enumMap map[string]bool) bool {
for _, col := range table.Columns {
if enumMap[col.Type] || enumMap[strings.ToLower(col.Type)] {
return true
}
}
return false
}
// getTableEnumNames returns the list of enum variable names used by a table
func (w *Writer) getTableEnumNames(table *models.Table, schema *models.Schema, enumMap map[string]bool) []string {
enumNames := make([]string, 0)
seen := make(map[string]bool)
for _, col := range table.Columns {
if enumMap[col.Type] || enumMap[strings.ToLower(col.Type)] {
// Find the enum in schema
for _, enum := range schema.Enums {
if strings.EqualFold(enum.Name, col.Type) {
varName := w.typeMapper.ToCamelCase(enum.Name)
if !seen[varName] {
enumNames = append(enumNames, varName)
seen[varName] = true
}
break
}
}
}
}
return enumNames
}
// getSortedColumnNames returns column names sorted by sequence or name
func (w *Writer) getSortedColumnNames(table *models.Table) []string {
// Convert map to slice
columns := make([]*models.Column, 0, len(table.Columns))
for _, col := range table.Columns {
columns = append(columns, col)
}
// Sort by sequence, then by primary key, then by name
// (Similar to GORM writer)
sortColumns := func(i, j int) bool {
// Sort by sequence if both have it
if columns[i].Sequence > 0 && columns[j].Sequence > 0 {
return columns[i].Sequence < columns[j].Sequence
}
// Put primary keys first
if columns[i].IsPrimaryKey != columns[j].IsPrimaryKey {
return columns[i].IsPrimaryKey
}
// Otherwise sort alphabetically
return columns[i].Name < columns[j].Name
}
// Create a custom sorter
for i := 0; i < len(columns); i++ {
for j := i + 1; j < len(columns); j++ {
if !sortColumns(i, j) {
columns[i], columns[j] = columns[j], columns[i]
}
}
}
// Extract names
names := make([]string, len(columns))
for i, col := range columns {
names[i] = col.Name
}
return names
}
// getForeignKeyForColumn returns the foreign key constraint for a column, if any
func (w *Writer) getForeignKeyForColumn(columnName string, table *models.Table) *models.Constraint {
for _, constraint := range table.Constraints {
if constraint.Type == models.ForeignKeyConstraint {
for _, col := range constraint.Columns {
if col == columnName {
return constraint
}
}
}
}
return nil
}
// writeOutput writes the content to file or stdout
func (w *Writer) writeOutput(content string) error {
if w.options.OutputPath != "" {
return os.WriteFile(w.options.OutputPath, []byte(content), 0644)
}
// Print to stdout
fmt.Print(content)
return nil
}
// shouldUseMultiFile determines whether to use multi-file mode based on metadata or output path
func (w *Writer) shouldUseMultiFile() bool {
// Check if multi_file is explicitly set in metadata
if w.options.Metadata != nil {
if mf, ok := w.options.Metadata["multi_file"].(bool); ok {
return mf
}
}
// Auto-detect based on output path
if w.options.OutputPath == "" {
// No output path means stdout (single file)
return false
}
// Check if path ends with .ts (explicit file)
if strings.HasSuffix(w.options.OutputPath, ".ts") {
return false
}
// Check if path ends with directory separator
if strings.HasSuffix(w.options.OutputPath, "/") || strings.HasSuffix(w.options.OutputPath, "\\") {
return true
}
// Check if path exists and is a directory
info, err := os.Stat(w.options.OutputPath)
if err == nil && info.IsDir() {
return true
}
// Default to single file for ambiguous cases
return false
}

176
pkg/writers/gorm/README.md Normal file
View File

@@ -0,0 +1,176 @@
# GORM Writer
Generates Go source files with GORM model definitions from database schema information.
## Overview
The GORM Writer converts RelSpec's internal database model representation into Go source code with GORM struct definitions, complete with proper tags, relationships, and methods.
## Features
- Generates GORM-compatible Go structs
- Creates proper `gorm` struct tags
- Generates `TableName()` methods
- Adds relationship fields (belongs-to, has-many)
- Supports both single-file and multi-file output
- Auto-generates helper methods (optional)
- Maps SQL types to Go types
- Handles nullable fields with custom sql_types
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/gorm"
)
func main() {
// Assume db is a *models.Database from a reader
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
}
writer := gorm.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate GORM models from PostgreSQL database (single file)
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output gorm \
--out-file models.go \
--package models
# Generate GORM models with multi-file output (one file per table)
relspec --input json \
--in-file schema.json \
--output gorm \
--out-file models/ \
--package models
# Convert DBML to GORM models
relspec --input dbml --in-file schema.dbml --output gorm --out-file models.go
```
## Output Modes
### Single File Mode
Generates all models in one file:
```bash
relspec --input pgsql --conn "..." --output gorm --out-file models.go
```
### Multi-File Mode
Generates one file per table (auto-detected when output is a directory):
```bash
relspec --input pgsql --conn "..." --output gorm --out-file models/
```
Files are named: `sql_{schema}_{table}.go`
## Generated Code Example
```go
package models
import (
"time"
sql_types "git.warky.dev/wdevs/sql_types"
)
type ModelUser struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement" json:"id"`
Username string `gorm:"column:username;type:varchar(50);not null;uniqueIndex" json:"username"`
Email string `gorm:"column:email;type:varchar(100);not null" json:"email"`
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:now()" json:"created_at"`
// Relationships
Pos []*ModelPost `gorm:"foreignKey:UserID;references:ID;constraint:OnDelete:CASCADE" json:"pos,omitempty"`
}
func (ModelUser) TableName() string {
return "public.users"
}
type ModelPost struct {
ID int64 `gorm:"column:id;type:bigint;primaryKey" json:"id"`
UserID int64 `gorm:"column:user_id;type:bigint;not null" json:"user_id"`
Title string `gorm:"column:title;type:varchar(200);not null" json:"title"`
Content sql_types.SqlString `gorm:"column:content;type:text" json:"content,omitempty"`
// Belongs to
Use *ModelUser `gorm:"foreignKey:UserID;references:ID" json:"use,omitempty"`
}
func (ModelPost) TableName() string {
return "public.posts"
}
```
## Writer Options
### Metadata Options
Configure the writer behavior using metadata in `WriterOptions`:
```go
options := &writers.WriterOptions{
OutputPath: "models.go",
PackageName: "models",
Metadata: map[string]interface{}{
"multi_file": true, // Enable multi-file mode
"populate_refs": true, // Populate RefDatabase/RefSchema
"generate_get_id_str": true, // Generate GetIDStr() methods
},
}
```
## Type Mapping
| SQL Type | Go Type | Notes |
|----------|---------|-------|
| bigint, int8 | int64 | - |
| integer, int, int4 | int | - |
| smallint, int2 | int16 | - |
| varchar, text | string | Not nullable |
| varchar, text (nullable) | sql_types.SqlString | Nullable |
| boolean, bool | bool | - |
| timestamp, timestamptz | time.Time | - |
| numeric, decimal | float64 | - |
| uuid | string | - |
| json, jsonb | string | - |
## Relationship Generation
The writer automatically generates relationship fields:
- **Belongs-to**: Generated for tables with foreign keys
- **Has-many**: Generated for tables referenced by foreign keys
- Relationship field names use 3-letter prefixes
- Includes proper `gorm` tags with `foreignKey` and `references`
## Notes
- Model names are prefixed with "Model" (e.g., `ModelUser`)
- Nullable columns use `sql_types.SqlString`, `sql_types.SqlInt64`, etc.
- Generated code is auto-formatted with `go fmt`
- JSON tags are automatically added
- Supports schema-qualified table names in `TableName()` method

View File

@@ -0,0 +1,272 @@
# GraphQL Schema Writer
The GraphQL writer converts RelSpec's internal database model into GraphQL Schema Definition Language (SDL) files.
## Features
- **Table to Type mapping**: Database tables become GraphQL types
- **Column to Field mapping**: Table columns become type fields
- **Enum support**: Database enums are preserved
- **Custom scalar declarations**: Automatically declares DateTime, JSON, Date scalars
- **Implicit relationships**: Generates relationship fields from foreign keys
- **Many-to-many support**: Handles junction tables intelligently
- **Clean output**: Proper formatting, field ordering, and comments
## Type Mappings
### SQL to GraphQL
| SQL Type | GraphQL Type | Notes |
|----------|--------------|-------|
| bigint, integer, serial (PK) | ID | Primary keys map to ID |
| bigint, integer, int | Int | |
| text, varchar, char | String | |
| uuid (PK) | ID | UUID primary keys also map to ID |
| uuid | String | Non-PK UUIDs map to String |
| double precision, numeric, float | Float | |
| boolean | Boolean | |
| timestamp, timestamptz | DateTime | Custom scalar |
| jsonb, json | JSON | Custom scalar |
| date | Date | Custom scalar |
| Enum types | Enum | Preserves enum name |
| Arrays (e.g., text[]) | [Type] | Mapped to GraphQL lists |
## Relationship Handling
The writer intelligently generates relationship fields based on foreign key constraints:
### Forward Relationships (FK on this table)
```sql
-- Post table has authorId FK to User.id
CREATE TABLE post (
id bigint PRIMARY KEY,
title text NOT NULL,
author_id bigint NOT NULL REFERENCES user(id)
);
```
```graphql
type Post {
id: ID!
title: String!
author: User! # Generated from authorId FK
}
```
### Reverse Relationships (FK on other table)
```graphql
type User {
id: ID!
email: String!
posts: [Post!]! # Reverse relationship (Post has FK to User)
}
```
### Many-to-Many Relationships
Junction tables (tables with only PKs and FKs) are automatically detected and hidden:
```sql
CREATE TABLE post_tag (
post_id bigint NOT NULL REFERENCES post(id),
tag_id bigint NOT NULL REFERENCES tag(id),
PRIMARY KEY (post_id, tag_id)
);
```
```graphql
type Post {
id: ID!
tags: [Tag!]! # Many-to-many through PostTag junction table
}
type Tag {
id: ID!
posts: [Post!]! # Reverse many-to-many
}
# Note: PostTag junction table is NOT included in output
```
## Usage
### Basic Usage
```go
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/graphql"
)
opts := &writers.WriterOptions{
OutputPath: "schema.graphql",
}
writer := graphql.NewWriter(opts)
err := writer.WriteDatabase(db)
```
### With Metadata Options
```go
opts := &writers.WriterOptions{
OutputPath: "schema.graphql",
Metadata: map[string]any{
"includeScalarDeclarations": true, // Include scalar declarations
"includeComments": true, // Include field/table comments
},
}
writer := graphql.NewWriter(opts)
err := writer.WriteDatabase(db)
```
### Write to Stdout
```go
opts := &writers.WriterOptions{
OutputPath: "", // Empty path writes to stdout
}
writer := graphql.NewWriter(opts)
err := writer.WriteDatabase(db)
```
## CLI Usage
```bash
# Convert PostgreSQL database to GraphQL
relspec convert --from pgsql \
--from-conn "postgres://user:pass@localhost:5432/mydb" \
--to graphql --to-path schema.graphql
# Convert GORM models to GraphQL
relspec convert --from gorm --from-path ./models \
--to graphql --to-path schema.graphql
# Convert JSON to GraphQL
relspec convert --from json --from-path schema.json \
--to graphql --to-path schema.graphql
```
## Output Format
The generated GraphQL schema follows this structure:
1. **Header comment** (if enabled)
2. **Custom scalar declarations** (if any custom scalars are used)
3. **Enum definitions** (alphabetically sorted)
4. **Type definitions** (with fields ordered: ID first, then scalars alphabetically, then relationships)
### Example Output
```graphql
# Generated GraphQL Schema
# Database: myapp
scalar DateTime
scalar JSON
scalar Date
enum Role {
ADMIN
USER
MODERATOR
}
type User {
id: ID!
createdAt: DateTime!
email: String!
name: String!
role: Role!
posts: [Post!]!
profile: Profile
}
type Post {
id: ID!
content: String
published: Boolean!
publishedAt: Date
title: String!
author: User!
tags: [Tag!]!
}
type Tag {
id: ID!
name: String!
posts: [Post!]!
}
```
## Metadata Options
| Option | Type | Description | Default |
|--------|------|-------------|---------|
| `includeScalarDeclarations` | bool | Include `scalar DateTime`, etc. declarations | true |
| `includeComments` | bool | Include table/field descriptions as comments | true |
| `preservePKType` | bool | Use Int/String for PKs instead of ID | false |
## Field Naming Conventions
- **FK columns**: Foreign key columns like `authorId` are removed from the output; instead, a relationship field `author` is generated
- **Relationship pluralization**: Reverse one-to-many relationships are pluralized (e.g., `posts`, `tags`)
- **CamelCase**: Field names are kept in their original casing from the database
## Junction Table Detection
A table is considered a junction table if it:
1. Has exactly 2 foreign key constraints
2. All columns are either primary keys or foreign keys
3. Has a composite primary key on the FK columns
Junction tables are automatically hidden from the GraphQL output, and many-to-many relationship fields are generated on the related types instead.
## Limitations
- All tables in all schemas are flattened into a single GraphQL schema
- No support for GraphQL-specific features like directives, interfaces, or unions
- Nullable vs non-nullable is determined solely by the `NOT NULL` constraint
## Example Conversion
**Input** (Database Schema):
```sql
CREATE TABLE user (
id bigint PRIMARY KEY,
email text NOT NULL,
created_at timestamp NOT NULL
);
CREATE TABLE post (
id bigint PRIMARY KEY,
title text NOT NULL,
author_id bigint NOT NULL REFERENCES user(id)
);
```
**Output** (GraphQL Schema):
```graphql
scalar DateTime
type User {
id: ID!
createdAt: DateTime!
email: String!
posts: [Post!]!
}
type Post {
id: ID!
title: String!
author: User!
}
```

View File

@@ -0,0 +1,178 @@
package graphql
import (
"fmt"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
)
func (w *Writer) generateRelationFields(table *models.Table, db *models.Database, schema *models.Schema) []string {
var fields []string
// 1. Forward relationships (this table has FK)
for _, constraint := range table.Constraints {
if constraint.Type != models.ForeignKeyConstraint {
continue
}
// Find the related table
relatedTable := w.findTable(db, constraint.ReferencedSchema, constraint.ReferencedTable)
if relatedTable == nil {
continue
}
// Generate field name (remove "Id" suffix from FK column if present)
fieldName := w.relationFieldName(constraint.Columns[0])
// Determine nullability from FK column
nullable := true
for _, colName := range constraint.Columns {
if col, exists := table.Columns[colName]; exists {
if col.NotNull {
nullable = false
break
}
}
}
// Format: fieldName: RelatedType! or fieldName: RelatedType
gqlType := relatedTable.Name
if !nullable {
gqlType += "!"
}
fields = append(fields, fmt.Sprintf(" %s: %s", fieldName, gqlType))
}
// 2. Reverse relationships (other tables reference this table)
for _, otherSchema := range db.Schemas {
for _, otherTable := range otherSchema.Tables {
if otherTable.Name == table.Name && otherSchema.Name == schema.Name {
continue
}
// Skip join tables for many-to-many
if w.isJoinTable(otherTable) {
// Check if this is a many-to-many through this join table
if m2mField := w.getManyToManyField(table, otherTable, db); m2mField != "" {
fields = append(fields, m2mField)
}
continue
}
for _, constraint := range otherTable.Constraints {
if constraint.Type == models.ForeignKeyConstraint &&
constraint.ReferencedTable == table.Name &&
constraint.ReferencedSchema == schema.Name {
// Add reverse relationship field (array)
fieldName := w.pluralize(w.camelCase(otherTable.Name))
fields = append(fields, fmt.Sprintf(" %s: [%s!]!", fieldName, otherTable.Name))
}
}
}
}
return fields
}
func (w *Writer) getManyToManyField(table *models.Table, joinTable *models.Table, db *models.Database) string {
// Find the two FK constraints in the join table
var fk1, fk2 *models.Constraint
for _, constraint := range joinTable.Constraints {
if constraint.Type == models.ForeignKeyConstraint {
if fk1 == nil {
fk1 = constraint
} else {
fk2 = constraint
}
}
}
if fk1 == nil || fk2 == nil {
return ""
}
// Determine which FK points to our table and which to the other table
var targetConstraint *models.Constraint
if fk1.ReferencedTable == table.Name {
targetConstraint = fk2
} else if fk2.ReferencedTable == table.Name {
targetConstraint = fk1
} else {
return "" // This join table doesn't involve our table
}
// Find the target table
targetTable := w.findTable(db, targetConstraint.ReferencedSchema, targetConstraint.ReferencedTable)
if targetTable == nil {
return ""
}
// Generate many-to-many field
fieldName := w.pluralize(w.camelCase(targetTable.Name))
return fmt.Sprintf(" %s: [%s!]!", fieldName, targetTable.Name)
}
func (w *Writer) findTable(db *models.Database, schemaName, tableName string) *models.Table {
for _, schema := range db.Schemas {
if schema.Name != schemaName {
continue
}
for _, table := range schema.Tables {
if table.Name == tableName {
return table
}
}
}
return nil
}
func (w *Writer) relationFieldName(fkColumnName string) string {
// Remove "Id" or "_id" suffix
name := fkColumnName
if strings.HasSuffix(name, "Id") {
name = name[:len(name)-2]
} else if strings.HasSuffix(name, "_id") {
name = name[:len(name)-3]
}
return w.camelCase(name)
}
func (w *Writer) camelCase(s string) string {
// If already camelCase or PascalCase, convert to camelCase
if s == "" {
return s
}
// Convert first character to lowercase
return strings.ToLower(string(s[0])) + s[1:]
}
func (w *Writer) pluralize(s string) string {
// Simple pluralization rules
if s == "" {
return s
}
// Already plural
if strings.HasSuffix(s, "s") {
return s
}
// Words ending in 'y' → 'ies'
if strings.HasSuffix(s, "y") {
return s[:len(s)-1] + "ies"
}
// Words ending in 's', 'x', 'z', 'ch', 'sh' → add 'es'
if strings.HasSuffix(s, "s") || strings.HasSuffix(s, "x") ||
strings.HasSuffix(s, "z") || strings.HasSuffix(s, "ch") ||
strings.HasSuffix(s, "sh") {
return s + "es"
}
// Default: add 's'
return s + "s"
}

View File

@@ -0,0 +1,148 @@
package graphql
import (
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
)
func (w *Writer) sqlTypeToGraphQL(sqlType string, column *models.Column, table *models.Table, schema *models.Schema) string {
// Check if this is a primary key → ID type
if column.IsPrimaryKey {
// Check metadata for explicit type preference
if w.options.Metadata != nil {
if preserveType, ok := w.options.Metadata["preservePKType"].(bool); ok && preserveType {
// Use Int or String based on SQL type
if w.isIntegerType(sqlType) {
return "Int"
}
return "String"
}
}
return "ID"
}
// Map SQL types to custom scalars
if scalar := w.sqlTypeToCustomScalar(sqlType); scalar != "" {
return scalar
}
// Check if it's an enum
if w.isEnumType(sqlType, schema) {
return sqlType
}
// Standard type mappings
baseType := strings.Split(sqlType, "(")[0] // Remove length/precision
baseType = strings.TrimSpace(baseType)
// Handle array types
if strings.HasSuffix(baseType, "[]") {
elemType := strings.TrimSuffix(baseType, "[]")
gqlType := w.mapBaseTypeToGraphQL(elemType)
return "[" + gqlType + "]"
}
return w.mapBaseTypeToGraphQL(baseType)
}
func (w *Writer) mapBaseTypeToGraphQL(baseType string) string {
typeMap := map[string]string{
// Text types
"text": "String",
"varchar": "String",
"char": "String",
"character": "String",
"bpchar": "String",
"name": "String",
// UUID
"uuid": "ID",
// Integer types
"integer": "Int",
"int": "Int",
"int2": "Int",
"int4": "Int",
"int8": "Int",
"bigint": "Int",
"smallint": "Int",
"serial": "Int",
"bigserial": "Int",
"smallserial": "Int",
// Float types
"double precision": "Float",
"float": "Float",
"float4": "Float",
"float8": "Float",
"real": "Float",
"numeric": "Float",
"decimal": "Float",
"money": "Float",
// Boolean
"boolean": "Boolean",
"bool": "Boolean",
}
if gqlType, ok := typeMap[baseType]; ok {
return gqlType
}
// Default: capitalize first letter
if len(baseType) > 0 {
return strings.ToUpper(string(baseType[0])) + baseType[1:]
}
return "String"
}
func (w *Writer) sqlTypeToCustomScalar(sqlType string) string {
scalarMap := map[string]string{
"timestamp": "DateTime",
"timestamptz": "DateTime",
"timestamp with time zone": "DateTime",
"jsonb": "JSON",
"json": "JSON",
"date": "Date",
}
baseType := strings.Split(sqlType, "(")[0]
baseType = strings.TrimSpace(baseType)
if scalar, ok := scalarMap[baseType]; ok {
return scalar
}
return ""
}
func (w *Writer) isIntegerType(sqlType string) bool {
intTypes := map[string]bool{
"integer": true,
"int": true,
"int2": true,
"int4": true,
"int8": true,
"bigint": true,
"smallint": true,
"serial": true,
"bigserial": true,
"smallserial": true,
}
baseType := strings.Split(sqlType, "(")[0]
baseType = strings.TrimSpace(baseType)
return intTypes[baseType]
}
func (w *Writer) isEnumType(sqlType string, schema *models.Schema) bool {
for _, enum := range schema.Enums {
if enum.Name == sqlType {
return true
}
}
return false
}

View File

@@ -0,0 +1,272 @@
package graphql
import (
"fmt"
"os"
"sort"
"strings"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
type Writer struct {
options *writers.WriterOptions
}
func NewWriter(options *writers.WriterOptions) *Writer {
return &Writer{
options: options,
}
}
func (w *Writer) WriteDatabase(db *models.Database) error {
content := w.databaseToGraphQL(db)
if w.options.OutputPath != "" {
return os.WriteFile(w.options.OutputPath, []byte(content), 0644)
}
fmt.Print(content)
return nil
}
func (w *Writer) WriteSchema(schema *models.Schema) error {
db := models.InitDatabase(schema.Name)
db.Schemas = []*models.Schema{schema}
return w.WriteDatabase(db)
}
func (w *Writer) WriteTable(table *models.Table) error {
schema := models.InitSchema(table.Schema)
schema.Tables = []*models.Table{table}
db := models.InitDatabase(schema.Name)
db.Schemas = []*models.Schema{schema}
return w.WriteDatabase(db)
}
func (w *Writer) databaseToGraphQL(db *models.Database) string {
var sb strings.Builder
// Header comment
if w.shouldIncludeComments() {
sb.WriteString("# Generated GraphQL Schema\n")
if db.Name != "" {
sb.WriteString(fmt.Sprintf("# Database: %s\n", db.Name))
}
sb.WriteString("\n")
}
// Custom scalar declarations
if w.shouldIncludeScalarDeclarations() {
scalars := w.collectCustomScalars(db)
if len(scalars) > 0 {
for _, scalar := range scalars {
sb.WriteString(fmt.Sprintf("scalar %s\n", scalar))
}
sb.WriteString("\n")
}
}
// Enum definitions
for _, schema := range db.Schemas {
for _, enum := range schema.Enums {
sb.WriteString(w.enumToGraphQL(enum))
sb.WriteString("\n")
}
}
// Type definitions
for _, schema := range db.Schemas {
for _, table := range schema.Tables {
// Skip join tables (tables with only PK+FK columns)
if w.isJoinTable(table) {
continue
}
sb.WriteString(w.tableToGraphQL(table, db, schema))
sb.WriteString("\n")
}
}
return sb.String()
}
func (w *Writer) shouldIncludeComments() bool {
if w.options.Metadata != nil {
if include, ok := w.options.Metadata["includeComments"].(bool); ok {
return include
}
}
return true // Default to true
}
func (w *Writer) shouldIncludeScalarDeclarations() bool {
if w.options.Metadata != nil {
if include, ok := w.options.Metadata["includeScalarDeclarations"].(bool); ok {
return include
}
}
return true // Default to true
}
func (w *Writer) collectCustomScalars(db *models.Database) []string {
scalarsNeeded := make(map[string]bool)
for _, schema := range db.Schemas {
for _, table := range schema.Tables {
for _, col := range table.Columns {
if scalar := w.sqlTypeToCustomScalar(col.Type); scalar != "" {
scalarsNeeded[scalar] = true
}
}
}
}
// Convert to sorted slice
scalars := make([]string, 0, len(scalarsNeeded))
for scalar := range scalarsNeeded {
scalars = append(scalars, scalar)
}
sort.Strings(scalars)
return scalars
}
func (w *Writer) isJoinTable(table *models.Table) bool {
// A join table typically has:
// 1. Exactly 2 FK constraints
// 2. Composite primary key on those FK columns
// 3. No other columns
fkCount := 0
for _, constraint := range table.Constraints {
if constraint.Type == models.ForeignKeyConstraint {
fkCount++
}
}
if fkCount != 2 {
return false
}
// Check if all columns are either PKs or FKs
for _, col := range table.Columns {
isFKColumn := false
for _, constraint := range table.Constraints {
if constraint.Type == models.ForeignKeyConstraint {
for _, fkCol := range constraint.Columns {
if fkCol == col.Name {
isFKColumn = true
break
}
}
}
}
if !isFKColumn && !col.IsPrimaryKey {
// Found a column that's neither PK nor FK
return false
}
}
return true
}
func (w *Writer) enumToGraphQL(enum *models.Enum) string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("enum %s {\n", enum.Name))
for _, value := range enum.Values {
sb.WriteString(fmt.Sprintf(" %s\n", value))
}
sb.WriteString("}\n")
return sb.String()
}
func (w *Writer) tableToGraphQL(table *models.Table, db *models.Database, schema *models.Schema) string {
var sb strings.Builder
// Type name
typeName := table.Name
// Description comment
if w.shouldIncludeComments() && (table.Description != "" || table.Comment != "") {
desc := table.Description
if desc == "" {
desc = table.Comment
}
sb.WriteString(fmt.Sprintf("# %s\n", desc))
}
sb.WriteString(fmt.Sprintf("type %s {\n", typeName))
// Collect and categorize fields
var idFields, scalarFields, relationFields []string
for _, column := range table.Columns {
// Skip FK columns (they become relation fields)
if w.isForeignKeyColumn(column, table) {
continue
}
gqlType := w.sqlTypeToGraphQL(column.Type, column, table, schema)
if gqlType == "" {
continue // Skip if type couldn't be mapped
}
// Determine nullability
if column.NotNull {
gqlType += "!"
}
field := fmt.Sprintf(" %s: %s", column.Name, gqlType)
if column.IsPrimaryKey {
idFields = append(idFields, field)
} else {
scalarFields = append(scalarFields, field)
}
}
// Add relation fields
relationFields = w.generateRelationFields(table, db, schema)
// Write fields in order: ID, scalars (sorted), relations (sorted)
for _, field := range idFields {
sb.WriteString(field + "\n")
}
sort.Strings(scalarFields)
for _, field := range scalarFields {
sb.WriteString(field + "\n")
}
if len(relationFields) > 0 {
if len(scalarFields) > 0 || len(idFields) > 0 {
sb.WriteString("\n") // Blank line before relations
}
sort.Strings(relationFields)
for _, field := range relationFields {
sb.WriteString(field + "\n")
}
}
sb.WriteString("}\n")
return sb.String()
}
func (w *Writer) isForeignKeyColumn(column *models.Column, table *models.Table) bool {
for _, constraint := range table.Constraints {
if constraint.Type == models.ForeignKeyConstraint {
for _, fkCol := range constraint.Columns {
if fkCol == column.Name {
return true
}
}
}
}
return false
}

View File

@@ -0,0 +1,412 @@
package graphql
import (
"strings"
"testing"
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
)
func TestWriter_WriteTable_Simple(t *testing.T) {
table := models.InitTable("User", "public")
idCol := models.InitColumn("id", "User", "public")
idCol.Type = "bigint"
idCol.IsPrimaryKey = true
idCol.NotNull = true
table.Columns["id"] = idCol
nameCol := models.InitColumn("name", "User", "public")
nameCol.Type = "text"
nameCol.NotNull = true
table.Columns["name"] = nameCol
emailCol := models.InitColumn("email", "User", "public")
emailCol.Type = "text"
emailCol.NotNull = false
table.Columns["email"] = emailCol
opts := &writers.WriterOptions{
OutputPath: "",
}
writer := NewWriter(opts)
schema := models.InitSchema("public")
schema.Tables = []*models.Table{table}
db := models.InitDatabase("test")
db.Schemas = []*models.Schema{schema}
output := writer.databaseToGraphQL(db)
// Verify output contains type definition
if !strings.Contains(output, "type User {") {
t.Error("Expected 'type User {' in output")
}
// Verify fields
if !strings.Contains(output, "id: ID!") {
t.Error("Expected 'id: ID!' in output")
}
if !strings.Contains(output, "name: String!") {
t.Error("Expected 'name: String!' in output")
}
if !strings.Contains(output, "email: String") {
t.Error("Expected 'email: String' in output")
}
// Ensure email is not followed by ! (nullable)
if strings.Contains(output, "email: String!") {
t.Error("Did not expect 'email: String!' (should be nullable)")
}
}
func TestWriter_WriteDatabase_WithEnum(t *testing.T) {
schema := models.InitSchema("public")
// Create enum
roleEnum := &models.Enum{
Name: "Role",
Schema: "public",
Values: []string{"ADMIN", "USER", "GUEST"},
}
schema.Enums = []*models.Enum{roleEnum}
// Create table with enum field
table := models.InitTable("User", "public")
idCol := models.InitColumn("id", "User", "public")
idCol.Type = "bigint"
idCol.IsPrimaryKey = true
idCol.NotNull = true
table.Columns["id"] = idCol
roleCol := models.InitColumn("role", "User", "public")
roleCol.Type = "Role"
roleCol.NotNull = true
table.Columns["role"] = roleCol
schema.Tables = []*models.Table{table}
db := models.InitDatabase("test")
db.Schemas = []*models.Schema{schema}
opts := &writers.WriterOptions{}
writer := NewWriter(opts)
output := writer.databaseToGraphQL(db)
// Verify enum definition
if !strings.Contains(output, "enum Role {") {
t.Error("Expected 'enum Role {' in output")
}
if !strings.Contains(output, "ADMIN") {
t.Error("Expected 'ADMIN' enum value in output")
}
// Verify enum usage in type
if !strings.Contains(output, "role: Role!") {
t.Error("Expected 'role: Role!' in output")
}
}
func TestWriter_WriteDatabase_WithRelations(t *testing.T) {
schema := models.InitSchema("public")
// Create User table
userTable := models.InitTable("User", "public")
userIdCol := models.InitColumn("id", "User", "public")
userIdCol.Type = "bigint"
userIdCol.IsPrimaryKey = true
userIdCol.NotNull = true
userTable.Columns["id"] = userIdCol
userNameCol := models.InitColumn("name", "User", "public")
userNameCol.Type = "text"
userNameCol.NotNull = true
userTable.Columns["name"] = userNameCol
// Create Post table with FK to User
postTable := models.InitTable("Post", "public")
postIdCol := models.InitColumn("id", "Post", "public")
postIdCol.Type = "bigint"
postIdCol.IsPrimaryKey = true
postIdCol.NotNull = true
postTable.Columns["id"] = postIdCol
titleCol := models.InitColumn("title", "Post", "public")
titleCol.Type = "text"
titleCol.NotNull = true
postTable.Columns["title"] = titleCol
authorIdCol := models.InitColumn("authorId", "Post", "public")
authorIdCol.Type = "bigint"
authorIdCol.NotNull = true
postTable.Columns["authorId"] = authorIdCol
// Add FK constraint
fkConstraint := models.InitConstraint("fk_post_author", models.ForeignKeyConstraint)
fkConstraint.Schema = "public"
fkConstraint.Table = "Post"
fkConstraint.Columns = []string{"authorId"}
fkConstraint.ReferencedSchema = "public"
fkConstraint.ReferencedTable = "User"
fkConstraint.ReferencedColumns = []string{"id"}
postTable.Constraints["fk_post_author"] = fkConstraint
schema.Tables = []*models.Table{userTable, postTable}
db := models.InitDatabase("test")
db.Schemas = []*models.Schema{schema}
opts := &writers.WriterOptions{}
writer := NewWriter(opts)
output := writer.databaseToGraphQL(db)
// Verify Post has author field (forward relationship)
if !strings.Contains(output, "author: User!") {
t.Error("Expected 'author: User!' in Post type")
}
// Verify authorId FK column is NOT in the output
if strings.Contains(output, "authorId:") {
t.Error("Did not expect 'authorId:' field in output (FK columns should be hidden)")
}
// Verify User has posts field (reverse relationship)
if !strings.Contains(output, "posts: [Post!]!") {
t.Error("Expected 'posts: [Post!]!' in User type")
}
}
func TestWriter_WriteDatabase_CustomScalars(t *testing.T) {
schema := models.InitSchema("public")
table := models.InitTable("Event", "public")
idCol := models.InitColumn("id", "Event", "public")
idCol.Type = "bigint"
idCol.IsPrimaryKey = true
idCol.NotNull = true
table.Columns["id"] = idCol
createdAtCol := models.InitColumn("createdAt", "Event", "public")
createdAtCol.Type = "timestamp"
createdAtCol.NotNull = true
table.Columns["createdAt"] = createdAtCol
metadataCol := models.InitColumn("metadata", "Event", "public")
metadataCol.Type = "jsonb"
metadataCol.NotNull = false
table.Columns["metadata"] = metadataCol
dateCol := models.InitColumn("eventDate", "Event", "public")
dateCol.Type = "date"
dateCol.NotNull = false
table.Columns["eventDate"] = dateCol
schema.Tables = []*models.Table{table}
db := models.InitDatabase("test")
db.Schemas = []*models.Schema{schema}
opts := &writers.WriterOptions{}
writer := NewWriter(opts)
output := writer.databaseToGraphQL(db)
// Verify scalar declarations
if !strings.Contains(output, "scalar DateTime") {
t.Error("Expected 'scalar DateTime' declaration")
}
if !strings.Contains(output, "scalar JSON") {
t.Error("Expected 'scalar JSON' declaration")
}
if !strings.Contains(output, "scalar Date") {
t.Error("Expected 'scalar Date' declaration")
}
// Verify field types
if !strings.Contains(output, "createdAt: DateTime!") {
t.Error("Expected 'createdAt: DateTime!' in output")
}
if !strings.Contains(output, "metadata: JSON") {
t.Error("Expected 'metadata: JSON' in output")
}
if !strings.Contains(output, "eventDate: Date") {
t.Error("Expected 'eventDate: Date' in output")
}
}
func TestWriter_WriteDatabase_ManyToMany(t *testing.T) {
schema := models.InitSchema("public")
// Create Post table
postTable := models.InitTable("Post", "public")
postIdCol := models.InitColumn("id", "Post", "public")
postIdCol.Type = "bigint"
postIdCol.IsPrimaryKey = true
postIdCol.NotNull = true
postTable.Columns["id"] = postIdCol
titleCol := models.InitColumn("title", "Post", "public")
titleCol.Type = "text"
titleCol.NotNull = true
postTable.Columns["title"] = titleCol
// Create Tag table
tagTable := models.InitTable("Tag", "public")
tagIdCol := models.InitColumn("id", "Tag", "public")
tagIdCol.Type = "bigint"
tagIdCol.IsPrimaryKey = true
tagIdCol.NotNull = true
tagTable.Columns["id"] = tagIdCol
nameCol := models.InitColumn("name", "Tag", "public")
nameCol.Type = "text"
nameCol.NotNull = true
tagTable.Columns["name"] = nameCol
// Create PostTag join table
joinTable := models.InitTable("PostTag", "public")
postIdJoinCol := models.InitColumn("postId", "PostTag", "public")
postIdJoinCol.Type = "bigint"
postIdJoinCol.NotNull = true
postIdJoinCol.IsPrimaryKey = true
joinTable.Columns["postId"] = postIdJoinCol
tagIdJoinCol := models.InitColumn("tagId", "PostTag", "public")
tagIdJoinCol.Type = "bigint"
tagIdJoinCol.NotNull = true
tagIdJoinCol.IsPrimaryKey = true
joinTable.Columns["tagId"] = tagIdJoinCol
// Add composite PK constraint
pkConstraint := models.InitConstraint("pk_posttag", models.PrimaryKeyConstraint)
pkConstraint.Schema = "public"
pkConstraint.Table = "PostTag"
pkConstraint.Columns = []string{"postId", "tagId"}
joinTable.Constraints["pk_posttag"] = pkConstraint
// Add FK to Post
fk1 := models.InitConstraint("fk_posttag_post", models.ForeignKeyConstraint)
fk1.Schema = "public"
fk1.Table = "PostTag"
fk1.Columns = []string{"postId"}
fk1.ReferencedSchema = "public"
fk1.ReferencedTable = "Post"
fk1.ReferencedColumns = []string{"id"}
joinTable.Constraints["fk_posttag_post"] = fk1
// Add FK to Tag
fk2 := models.InitConstraint("fk_posttag_tag", models.ForeignKeyConstraint)
fk2.Schema = "public"
fk2.Table = "PostTag"
fk2.Columns = []string{"tagId"}
fk2.ReferencedSchema = "public"
fk2.ReferencedTable = "Tag"
fk2.ReferencedColumns = []string{"id"}
joinTable.Constraints["fk_posttag_tag"] = fk2
schema.Tables = []*models.Table{postTable, tagTable, joinTable}
db := models.InitDatabase("test")
db.Schemas = []*models.Schema{schema}
opts := &writers.WriterOptions{}
writer := NewWriter(opts)
output := writer.databaseToGraphQL(db)
// Verify join table is NOT in output
if strings.Contains(output, "type PostTag") {
t.Error("Did not expect 'type PostTag' (join tables should be hidden)")
}
// Verify Post has tags field
if !strings.Contains(output, "tags: [Tag!]!") {
t.Error("Expected 'tags: [Tag!]!' in Post type")
}
// Verify Tag has posts field
if !strings.Contains(output, "posts: [Post!]!") {
t.Error("Expected 'posts: [Post!]!' in Tag type")
}
}
func TestWriter_WriteDatabase_UUIDType(t *testing.T) {
schema := models.InitSchema("public")
table := models.InitTable("User", "public")
idCol := models.InitColumn("id", "User", "public")
idCol.Type = "uuid"
idCol.IsPrimaryKey = true
idCol.NotNull = true
table.Columns["id"] = idCol
schema.Tables = []*models.Table{table}
db := models.InitDatabase("test")
db.Schemas = []*models.Schema{schema}
opts := &writers.WriterOptions{}
writer := NewWriter(opts)
output := writer.databaseToGraphQL(db)
// UUID primary keys should still map to ID
if !strings.Contains(output, "id: ID!") {
t.Error("Expected 'id: ID!' for UUID primary key")
}
}
func TestWriter_Metadata_NoScalarDeclarations(t *testing.T) {
schema := models.InitSchema("public")
table := models.InitTable("Event", "public")
idCol := models.InitColumn("id", "Event", "public")
idCol.Type = "bigint"
idCol.IsPrimaryKey = true
table.Columns["id"] = idCol
createdAtCol := models.InitColumn("createdAt", "Event", "public")
createdAtCol.Type = "timestamp"
createdAtCol.NotNull = true
table.Columns["createdAt"] = createdAtCol
schema.Tables = []*models.Table{table}
db := models.InitDatabase("test")
db.Schemas = []*models.Schema{schema}
opts := &writers.WriterOptions{
Metadata: map[string]any{
"includeScalarDeclarations": false,
},
}
writer := NewWriter(opts)
output := writer.databaseToGraphQL(db)
// Verify no scalar declarations
if strings.Contains(output, "scalar DateTime") {
t.Error("Did not expect 'scalar DateTime' with includeScalarDeclarations=false")
}
// But field should still use DateTime
if !strings.Contains(output, "createdAt: DateTime!") {
t.Error("Expected 'createdAt: DateTime!' in output")
}
}

277
pkg/writers/json/README.md Normal file
View File

@@ -0,0 +1,277 @@
# JSON Writer
Generates database schema definitions in JSON format.
## Overview
The JSON Writer converts RelSpec's internal database model representation into JSON format, providing a complete, structured representation of the database schema.
## Features
- Generates RelSpec's canonical JSON schema format
- Complete schema representation including:
- Databases and schemas
- Tables, columns, and data types
- Constraints (PK, FK, unique, check)
- Indexes
- Relationships
- Views and sequences
- Pretty-printed, human-readable output
- Suitable for version control
- Ideal interchange format
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/json"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "schema.json",
}
writer := json.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Export PostgreSQL database to JSON
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output json \
--out-file schema.json
# Convert GORM models to JSON
relspec --input gorm --in-file models.go --output json --out-file schema.json
# Convert DBML to JSON
relspec --input dbml --in-file diagram.dbml --output json --out-file schema.json
```
## Generated JSON Example
```json
{
"name": "myapp",
"description": "",
"database_type": "postgresql",
"database_version": "",
"source_format": "pgsql",
"schemas": [
{
"name": "public",
"description": "",
"tables": [
{
"name": "users",
"schema": "public",
"description": "",
"columns": {
"id": {
"name": "id",
"table": "users",
"schema": "public",
"type": "bigint",
"length": 0,
"precision": 0,
"scale": 0,
"not_null": true,
"is_primary_key": true,
"auto_increment": true,
"default": "",
"sequence": 1
},
"username": {
"name": "username",
"table": "users",
"schema": "public",
"type": "varchar",
"length": 50,
"not_null": true,
"is_primary_key": false,
"auto_increment": false,
"sequence": 2
},
"email": {
"name": "email",
"table": "users",
"schema": "public",
"type": "varchar",
"length": 100,
"not_null": true,
"sequence": 3
}
},
"constraints": {
"pk_users": {
"name": "pk_users",
"type": "PRIMARY KEY",
"table": "users",
"schema": "public",
"columns": ["id"]
},
"uq_users_username": {
"name": "uq_users_username",
"type": "UNIQUE",
"table": "users",
"schema": "public",
"columns": ["username"]
}
},
"indexes": {
"idx_users_email": {
"name": "idx_users_email",
"table": "users",
"schema": "public",
"columns": ["email"],
"unique": false,
"type": "btree"
}
},
"relationships": {}
},
{
"name": "posts",
"schema": "public",
"columns": {
"id": {
"name": "id",
"type": "bigint",
"not_null": true,
"is_primary_key": true,
"sequence": 1
},
"user_id": {
"name": "user_id",
"type": "bigint",
"not_null": true,
"sequence": 2
},
"title": {
"name": "title",
"type": "varchar",
"length": 200,
"not_null": true,
"sequence": 3
},
"content": {
"name": "content",
"type": "text",
"not_null": false,
"sequence": 4
}
},
"constraints": {
"fk_posts_user_id": {
"name": "fk_posts_user_id",
"type": "FOREIGN KEY",
"table": "posts",
"schema": "public",
"columns": ["user_id"],
"referenced_table": "users",
"referenced_schema": "public",
"referenced_columns": ["id"],
"on_delete": "CASCADE",
"on_update": "NO ACTION"
}
},
"indexes": {
"idx_posts_user_id": {
"name": "idx_posts_user_id",
"columns": ["user_id"],
"unique": false,
"type": "btree"
}
}
}
],
"views": [],
"sequences": []
}
]
}
```
## Schema Structure
The JSON format includes:
### Database Level
- `name` - Database name
- `description` - Database description
- `database_type` - Database system type
- `database_version` - Version information
- `source_format` - Original source format
- `schemas` - Array of schema objects
### Schema Level
- `name` - Schema name
- `description` - Schema description
- `tables` - Array of table objects
- `views` - Array of view objects
- `sequences` - Array of sequence objects
### Table Level
- `name` - Table name
- `schema` - Schema name
- `description` - Table description
- `columns` - Map of column objects
- `constraints` - Map of constraint objects
- `indexes` - Map of index objects
- `relationships` - Map of relationship objects
### Column Level
- `name` - Column name
- `type` - Data type
- `length` - Type length
- `precision`, `scale` - Numeric precision
- `not_null` - NOT NULL flag
- `is_primary_key` - Primary key flag
- `auto_increment` - Auto-increment flag
- `default` - Default value
- `sequence` - Column order
### Constraint Level
- `name` - Constraint name
- `type` - Constraint type (PRIMARY KEY, FOREIGN KEY, UNIQUE, CHECK)
- `columns` - Constrained columns
- `referenced_table`, `referenced_schema` - FK references
- `referenced_columns` - Referenced columns
- `on_delete`, `on_update` - FK actions
### Index Level
- `name` - Index name
- `columns` - Indexed columns
- `unique` - Unique flag
- `type` - Index type
## Use Cases
- **Version Control** - Track schema changes in git
- **Documentation** - Human-readable schema documentation
- **Interchange** - Standard format for tool integration
- **Backup** - Schema backup without database access
- **Testing** - Test data for schema validation
- **API** - Schema information for APIs
## Notes
- Output is pretty-printed with 2-space indentation
- Preserves all schema metadata
- Can be round-tripped (read and write) without loss
- Schema-agnostic format
- Ideal for automation and tooling

195
pkg/writers/pgsql/README.md Normal file
View File

@@ -0,0 +1,195 @@
# PostgreSQL Writer
Generates PostgreSQL DDL (Data Definition Language) SQL scripts from database schema information.
## Overview
The PostgreSQL Writer converts RelSpec's internal database model representation into PostgreSQL-compatible SQL DDL scripts, including CREATE TABLE statements, constraints, indexes, views, and sequences.
## Features
- Generates complete PostgreSQL DDL
- Creates schemas, tables, columns
- Defines constraints (PK, FK, unique, check)
- Creates indexes
- Generates views and sequences
- Supports migration scripts
- Includes audit triggers (optional)
- Handles PostgreSQL-specific data types
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "schema.sql",
}
writer := pgsql.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate PostgreSQL DDL from JSON schema
relspec --input json \
--in-file schema.json \
--output pgsql \
--out-file schema.sql
# Convert GORM models to PostgreSQL DDL
relspec --input gorm \
--in-file models.go \
--output pgsql \
--out-file create_tables.sql
# Export live database schema to SQL
relspec --input pgsql \
--conn "postgres://localhost/source_db" \
--output pgsql \
--out-file backup_schema.sql
```
## Generated SQL Example
```sql
-- Schema: public
CREATE SCHEMA IF NOT EXISTS public;
-- Table: public.users
CREATE TABLE IF NOT EXISTS public.users (
id BIGSERIAL PRIMARY KEY,
username VARCHAR(50) NOT NULL,
email VARCHAR(100) NOT NULL,
bio TEXT,
created_at TIMESTAMP NOT NULL DEFAULT NOW()
);
-- Constraints for public.users
ALTER TABLE public.users
ADD CONSTRAINT uq_users_username UNIQUE (username);
-- Indexes for public.users
CREATE INDEX idx_users_email ON public.users (email);
-- Table: public.posts
CREATE TABLE IF NOT EXISTS public.posts (
id BIGSERIAL PRIMARY KEY,
user_id BIGINT NOT NULL,
title VARCHAR(200) NOT NULL,
content TEXT,
created_at TIMESTAMP DEFAULT NOW()
);
-- Foreign Keys for public.posts
ALTER TABLE public.posts
ADD CONSTRAINT fk_posts_user_id
FOREIGN KEY (user_id)
REFERENCES public.users (id)
ON DELETE CASCADE
ON UPDATE NO ACTION;
-- Indexes for public.posts
CREATE INDEX idx_posts_user_id ON public.posts (user_id);
```
## Writer Options
### Metadata Options
```go
options := &writers.WriterOptions{
OutputPath: "schema.sql",
Metadata: map[string]interface{}{
"include_drop": true, // Include DROP statements
"include_audit": true, // Include audit triggers
"if_not_exists": true, // Use IF NOT EXISTS
"migration_mode": false, // Generate migration script
},
}
```
## Features
### Full DDL Generation
Generates complete database structure:
- CREATE SCHEMA statements
- CREATE TABLE with all columns and types
- PRIMARY KEY constraints
- FOREIGN KEY constraints with actions
- UNIQUE constraints
- CHECK constraints
- CREATE INDEX statements
- CREATE VIEW statements
- CREATE SEQUENCE statements
### Migration Mode
When `migration_mode` is enabled, generates migration scripts with:
- Version tracking
- Up/down migrations
- Transactional DDL
- Rollback support
### Audit Triggers
When `include_audit` is enabled, adds:
- Created/updated timestamp triggers
- Audit logging functionality
- Change tracking
## PostgreSQL-Specific Features
- Serial types (SERIAL, BIGSERIAL)
- Advanced types (UUID, JSONB, ARRAY)
- Schema-qualified names
- Constraint actions (CASCADE, RESTRICT, SET NULL)
- Partial indexes
- Function-based indexes
- Check constraints with expressions
## Data Types
Supports all PostgreSQL data types:
- Integer types: SMALLINT, INTEGER, BIGINT, SERIAL, BIGSERIAL
- Numeric types: NUMERIC, DECIMAL, REAL, DOUBLE PRECISION
- String types: VARCHAR, CHAR, TEXT
- Date/Time: DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL
- Boolean: BOOLEAN
- Binary: BYTEA
- JSON: JSON, JSONB
- UUID: UUID
- Network: INET, CIDR, MACADDR
- Special: ARRAY, HSTORE
## Notes
- Generated SQL is formatted and readable
- Comments are preserved from source schema
- Schema names are fully qualified
- Default values are properly quoted
- Constraint names follow PostgreSQL conventions
- Compatible with PostgreSQL 12+

View File

@@ -0,0 +1,135 @@
# Prisma Writer
Generates Prisma schema files from database schema information.
## Overview
The Prisma Writer converts RelSpec's internal database model representation into Prisma schema language (`.prisma` files), complete with models, fields, relationships, and attributes.
## Features
- Generates Prisma schema syntax
- Creates model definitions with proper field types
- Adds Prisma attributes (@id, @unique, @default, etc.)
- Generates relationship fields
- Includes datasource and generator configurations
- Maps table/column names with @map and @@map
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/prisma"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "schema.prisma",
Metadata: map[string]interface{}{
"datasource_provider": "postgresql",
},
}
writer := prisma.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate Prisma schema from PostgreSQL database
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output prisma \
--out-file schema.prisma
# Convert GORM models to Prisma
relspec --input gorm --in-file models.go --output prisma --out-file schema.prisma
# Convert JSON to Prisma schema
relspec --input json --in-file database.json --output prisma --out-file prisma/schema.prisma
```
## Generated Code Example
```prisma
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
generator client {
provider = "prisma-client-js"
}
model User {
id Int @id @default(autoincrement())
username String @unique @db.VarChar(50)
email String @db.VarChar(100)
bio String? @db.Text
createdAt DateTime @default(now()) @map("created_at")
posts Post[]
@@map("users")
}
model Post {
id Int @id @default(autoincrement())
userId Int @map("user_id")
title String @db.VarChar(200)
content String? @db.Text
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
@@map("posts")
}
```
## Supported Prisma Attributes
### Field Attributes
- `@id` - Primary key
- `@unique` - Unique constraint
- `@default()` - Default value
- `@map()` - Column name mapping
- `@db.*` - Database-specific types
- `@relation()` - Relationship definition
### Model Attributes
- `@@map()` - Table name mapping
- `@@unique()` - Composite unique constraints
- `@@index()` - Index definitions
- `@@id()` - Composite primary keys
## Type Mapping
| SQL Type | Prisma Type | Database Type |
|----------|-------------|---------------|
| bigint | Int | @db.BigInt |
| integer | Int | - |
| varchar(n) | String | @db.VarChar(n) |
| text | String | @db.Text |
| boolean | Boolean | - |
| timestamp | DateTime | @db.Timestamp |
| uuid | String | @db.Uuid |
| json | Json | - |
## Notes
- Model names are PascalCase (e.g., `User`, `Post`)
- Field names are camelCase with `@map` for snake_case columns
- Table names use `@@map` when different from model name
- Nullable fields are marked with `?`
- Relationship fields are automatically generated
- Datasource provider defaults to `postgresql`

View File

@@ -0,0 +1,169 @@
# TypeORM Writer
Generates TypeScript files with TypeORM entity definitions from database schema information.
## Overview
The TypeORM Writer converts RelSpec's internal database model representation into TypeScript source code with TypeORM entity classes, including proper decorators, relationships, and column configurations.
## Features
- Generates TypeORM-compatible TypeScript entities
- Creates proper decorator usage (@Entity, @Column, etc.)
- Adds relationship decorators (@OneToMany, @ManyToOne, @JoinColumn)
- Handles column types and options
- Supports constraints and indexes
- Outputs formatted TypeScript code
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/typeorm"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "entities/",
}
writer := typeorm.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Generate TypeORM entities from PostgreSQL database
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output typeorm \
--out-file entities/
# Convert GORM models to TypeORM
relspec --input gorm --in-file models.go --output typeorm --out-file src/entities/
# Convert JSON to TypeORM entities
relspec --input json --in-file schema.json --output typeorm --out-file entities/
```
## Generated Code Example
```typescript
import {
Entity,
PrimaryGeneratedColumn,
Column,
CreateDateColumn,
OneToMany,
ManyToOne,
JoinColumn,
Index,
} from 'typeorm';
import { Post } from './Post';
@Entity('users')
export class User {
@PrimaryGeneratedColumn('increment')
id: number;
@Column({ type: 'varchar', length: 50, unique: true })
@Index()
username: string;
@Column({ type: 'varchar', length: 100 })
email: string;
@Column({ type: 'text', nullable: true })
bio: string | null;
@CreateDateColumn({ name: 'created_at' })
createdAt: Date;
@OneToMany(() => Post, (post) => post.user)
posts: Post[];
}
@Entity('posts')
export class Post {
@PrimaryGeneratedColumn('increment')
id: number;
@Column({ name: 'user_id' })
userId: number;
@Column({ type: 'varchar', length: 200 })
title: string;
@Column({ type: 'text', nullable: true })
content: string | null;
@ManyToOne(() => User, (user) => user.posts, { onDelete: 'CASCADE' })
@JoinColumn({ name: 'user_id' })
user: User;
}
```
## Supported TypeORM Decorators
### Entity Decorators
- `@Entity()` - Define entity/table
- `@PrimaryGeneratedColumn()` - Auto-increment primary key
- `@PrimaryColumn()` - Primary key
- `@Column()` - Column definition
- `@CreateDateColumn()` - Auto-set creation timestamp
- `@UpdateDateColumn()` - Auto-update timestamp
### Relationship Decorators
- `@OneToMany()` - One-to-many relationship
- `@ManyToOne()` - Many-to-one relationship
- `@JoinColumn()` - Foreign key column specification
### Constraint Decorators
- `@Index()` - Create index
- `@Unique()` - Unique constraint
## Column Options
```typescript
@Column({
type: 'varchar', // Column type
length: 255, // Length for varchar/char
nullable: true, // Allow NULL
unique: true, // Unique constraint
default: 'value', // Default value
name: 'column_name', // Database column name
})
```
## Type Mapping
| SQL Type | TypeScript Type | TypeORM Type |
|----------|-----------------|--------------|
| bigint | number | 'bigint' |
| integer | number | 'int' |
| varchar | string | 'varchar' |
| text | string | 'text' |
| boolean | boolean | 'boolean' |
| timestamp | Date | 'timestamp' |
| json | object | 'json' |
| uuid | string | 'uuid' |
## Notes
- Entity class names are PascalCase
- One file per entity (named after the entity)
- Relationship imports are auto-generated
- Nullable columns use TypeScript union with `null`
- Foreign key actions (CASCADE, etc.) are included
- Schema names can be specified in `@Entity()` decorator

212
pkg/writers/yaml/README.md Normal file
View File

@@ -0,0 +1,212 @@
# YAML Writer
Generates database schema definitions in YAML format.
## Overview
The YAML Writer converts RelSpec's internal database model representation into YAML format, providing a human-readable, structured representation of the database schema.
## Features
- Generates RelSpec's canonical YAML schema format
- Human-readable alternative to JSON
- Complete schema representation including:
- Databases and schemas
- Tables, columns, and data types
- Constraints (PK, FK, unique, check)
- Indexes
- Relationships
- Views and sequences
- Supports comments
- Ideal for manual editing and configuration
## Usage
### Basic Example
```go
package main
import (
"git.warky.dev/wdevs/relspecgo/pkg/models"
"git.warky.dev/wdevs/relspecgo/pkg/writers"
"git.warky.dev/wdevs/relspecgo/pkg/writers/yaml"
)
func main() {
options := &writers.WriterOptions{
OutputPath: "schema.yaml",
}
writer := yaml.NewWriter(options)
err := writer.WriteDatabase(db)
if err != nil {
panic(err)
}
}
```
### CLI Examples
```bash
# Export PostgreSQL database to YAML
relspec --input pgsql \
--conn "postgres://localhost/mydb" \
--output yaml \
--out-file schema.yaml
# Convert GORM models to YAML
relspec --input gorm --in-file models.go --output yaml --out-file schema.yaml
# Convert JSON to YAML
relspec --input json --in-file schema.json --output yaml --out-file schema.yaml
```
## Generated YAML Example
```yaml
name: myapp
database_type: postgresql
source_format: pgsql
schemas:
- name: public
tables:
- name: users
schema: public
columns:
id:
name: id
table: users
schema: public
type: bigint
not_null: true
is_primary_key: true
auto_increment: true
sequence: 1
username:
name: username
table: users
schema: public
type: varchar
length: 50
not_null: true
sequence: 2
email:
name: email
table: users
schema: public
type: varchar
length: 100
not_null: true
sequence: 3
constraints:
pk_users:
name: pk_users
type: PRIMARY KEY
table: users
schema: public
columns:
- id
uq_users_username:
name: uq_users_username
type: UNIQUE
table: users
schema: public
columns:
- username
indexes:
idx_users_email:
name: idx_users_email
table: users
schema: public
columns:
- email
unique: false
type: btree
- name: posts
schema: public
columns:
id:
name: id
type: bigint
not_null: true
is_primary_key: true
sequence: 1
user_id:
name: user_id
type: bigint
not_null: true
sequence: 2
title:
name: title
type: varchar
length: 200
not_null: true
sequence: 3
content:
name: content
type: text
not_null: false
sequence: 4
constraints:
fk_posts_user_id:
name: fk_posts_user_id
type: FOREIGN KEY
table: posts
schema: public
columns:
- user_id
referenced_table: users
referenced_schema: public
referenced_columns:
- id
on_delete: CASCADE
on_update: NO ACTION
indexes:
idx_posts_user_id:
name: idx_posts_user_id
columns:
- user_id
unique: false
type: btree
views: []
sequences: []
```
## Schema Structure
The YAML format mirrors the JSON structure with human-readable syntax:
- Database level: `name`, `database_type`, `source_format`, `schemas`
- Schema level: `name`, `tables`, `views`, `sequences`
- Table level: `name`, `schema`, `columns`, `constraints`, `indexes`
- Column level: `name`, `type`, `length`, `not_null`, etc.
- Constraint level: `name`, `type`, `columns`, foreign key details
- Index level: `name`, `columns`, `unique`, `type`
## Advantages Over JSON
- More human-readable
- Easier to edit manually
- Supports comments
- Less verbose (no braces/brackets)
- Better for configuration files
- Natural indentation
## Use Cases
- **Configuration** - Schema as configuration
- **Documentation** - Human-readable schema docs
- **Version Control** - Easier to read diffs
- **Manual Editing** - Easier to modify by hand
- **Code Generation** - Template-friendly format
## Notes
- Output is properly indented (2 spaces)
- Preserves all schema metadata
- Can be round-tripped with YAML reader
- Compatible with YAML 1.2
- More readable than JSON for large schemas
- Ideal for documentation and manual workflows

View File

@@ -0,0 +1,156 @@
// Code generated by relspecgo. DO NOT EDIT.
import { pgTable, pgEnum, integer, bigint, smallint, serial, bigserial, smallserial, text, varchar, char, boolean, numeric, real, doublePrecision, timestamp, date, time, interval, json, jsonb, uuid, bytea } from 'drizzle-orm/pg-core';
import { sql } from 'drizzle-orm';
// Enums
export const userRole = pgEnum('UserRole', ['admin', 'user', 'moderator', 'guest']);
export const orderStatus = pgEnum('OrderStatus', ['pending', 'processing', 'shipped', 'delivered', 'cancelled']);
// Table: users
export const users = pgTable('users', {
id: serial('id').primaryKey(),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
email: varchar('email').notNull().unique(),
isActive: boolean('is_active').notNull().default(true),
lastLoginAt: timestamp('last_login_at'),
passwordHash: varchar('password_hash').notNull(),
profile: jsonb('profile'),
role: pgEnum('UserRole')('role').notNull(),
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
username: varchar('username').notNull().unique(),
});
// Types for users
export type Users = typeof users.$inferSelect;
export type NewUsers = typeof users.$inferInsert;
// Table: profiles
export const profiles = pgTable('profiles', {
id: serial('id').primaryKey(),
avatarUrl: varchar('avatar_url'),
bio: text('bio'),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
dateOfBirth: date('date_of_birth'),
firstName: varchar('first_name'),
lastName: varchar('last_name'),
phoneNumber: varchar('phone_number'),
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
userId: integer('user_id').notNull().unique().references(() => users.id),
});
// Types for profiles
export type Profiles = typeof profiles.$inferSelect;
export type NewProfiles = typeof profiles.$inferInsert;
// Table: posts
export const posts = pgTable('posts', {
id: serial('id').primaryKey(),
authorId: integer('author_id').notNull().references(() => users.id),
content: text('content').notNull(),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
excerpt: text('excerpt'),
featuredImage: varchar('featured_image'),
isPublished: boolean('is_published').notNull().default(false),
publishedAt: timestamp('published_at'),
slug: varchar('slug').notNull().unique(),
title: varchar('title').notNull(),
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
viewCount: integer('view_count').notNull().default(0),
});
// Types for posts
export type Posts = typeof posts.$inferSelect;
export type NewPosts = typeof posts.$inferInsert;
// Table: comments
export const comments = pgTable('comments', {
id: serial('id').primaryKey(),
authorId: integer('author_id').notNull().references(() => users.id),
content: text('content').notNull(),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
isApproved: boolean('is_approved').notNull().default(false),
parentId: integer('parent_id').references(() => comments.id),
postId: integer('post_id').notNull().references(() => posts.id),
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
});
// Types for comments
export type Comments = typeof comments.$inferSelect;
export type NewComments = typeof comments.$inferInsert;
// Table: categories
export const categories = pgTable('categories', {
id: serial('id').primaryKey(),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
description: text('description'),
name: varchar('name').notNull().unique(),
parentId: integer('parent_id').references(() => categories.id),
slug: varchar('slug').notNull().unique(),
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
});
// Types for categories
export type Categories = typeof categories.$inferSelect;
export type NewCategories = typeof categories.$inferInsert;
// Table: post_categories
export const postCategories = pgTable('post_categories', {
categoryId: integer('category_id').notNull().references(() => categories.id),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
postId: integer('post_id').notNull().references(() => posts.id),
});
// Types for post_categories
export type PostCategories = typeof postCategories.$inferSelect;
export type NewPostCategories = typeof postCategories.$inferInsert;
// Table: tags
export const tags = pgTable('tags', {
id: serial('id').primaryKey(),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
name: varchar('name').notNull().unique(),
slug: varchar('slug').notNull().unique(),
});
// Types for tags
export type Tags = typeof tags.$inferSelect;
export type NewTags = typeof tags.$inferInsert;
// Table: post_tags
export const postTags = pgTable('post_tags', {
createdAt: timestamp('created_at').notNull().default(sql`now()`),
postId: integer('post_id').notNull().references(() => posts.id),
tagId: integer('tag_id').notNull().references(() => tags.id),
});
// Types for post_tags
export type PostTags = typeof postTags.$inferSelect;
export type NewPostTags = typeof postTags.$inferInsert;
// Table: orders
export const orders = pgTable('orders', {
id: serial('id').primaryKey(),
billingAddress: jsonb('billing_address').notNull(),
completedAt: timestamp('completed_at'),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
currency: varchar('currency').notNull().default('USD'),
notes: text('notes'),
orderNumber: varchar('order_number').notNull().unique(),
shippingAddress: jsonb('shipping_address').notNull(),
status: pgEnum('OrderStatus')('status').notNull().default('pending'),
totalAmount: numeric('total_amount').notNull(),
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
userId: integer('user_id').notNull().references(() => users.id),
});
// Types for orders
export type Orders = typeof orders.$inferSelect;
export type NewOrders = typeof orders.$inferInsert;
// Table: sessions
export const sessions = pgTable('sessions', {
id: uuid('id').primaryKey().default(sql`gen_random_uuid()`),
createdAt: timestamp('created_at').notNull().default(sql`now()`),
expiresAt: timestamp('expires_at').notNull(),
ipAddress: varchar('ip_address'),
token: varchar('token').notNull().unique(),
userAgent: text('user_agent'),
userId: integer('user_id').notNull().references(() => users.id),
});
// Types for sessions
export type Sessions = typeof sessions.$inferSelect;
export type NewSessions = typeof sessions.$inferInsert;

View File

@@ -0,0 +1,90 @@
// Code generated by relspecgo. DO NOT EDIT.
import { pgTable, pgEnum, integer, bigint, smallint, serial, bigserial, smallserial, text, varchar, char, boolean, numeric, real, doublePrecision, timestamp, date, time, interval, json, jsonb, uuid, bytea } from 'drizzle-orm/pg-core';
import { sql } from 'drizzle-orm';
// Enums
export const role = pgEnum('Role', ['USER', 'ADMIN']);
export type Role = 'USER' | 'ADMIN';
// Table: User
export interface User {
id: number;
email: string;
name: string | null;
profile: string | null;
role: Role;
}
export const user = pgTable('User', {
id: integer('id').primaryKey().generatedAlwaysAsIdentity(),
email: text('email').notNull().unique(),
name: text('name'),
profile: text('profile'),
role: pgEnum('Role')('role').notNull().default('USER'),
});
export type NewUser = typeof user.$inferInsert;
// Table: Profile
export interface Profile {
id: number;
bio: string;
user: string;
userId: number;
}
export const profile = pgTable('Profile', {
id: integer('id').primaryKey().generatedAlwaysAsIdentity(),
bio: text('bio').notNull(),
user: text('user').notNull(),
userId: integer('userId').notNull().unique().references(() => user.id),
});
export type NewProfile = typeof profile.$inferInsert;
// Table: Post
export interface Post {
id: number;
author: string;
authorId: number;
createdAt: Date;
published: boolean;
title: string;
updatedAt: Date; // @updatedAt
}
export const post = pgTable('Post', {
id: integer('id').primaryKey().generatedAlwaysAsIdentity(),
author: text('author').notNull(),
authorId: integer('authorId').notNull().references(() => user.id),
createdAt: timestamp('createdAt').notNull().default(sql`now()`),
published: boolean('published').notNull().default(false),
title: text('title').notNull(),
updatedAt: timestamp('updatedAt').notNull(), // @updatedAt
});
export type NewPost = typeof post.$inferInsert;
// Table: Category
export interface Category {
id: number;
name: string;
}
export const category = pgTable('Category', {
id: integer('id').primaryKey().generatedAlwaysAsIdentity(),
name: text('name').notNull(),
});
export type NewCategory = typeof category.$inferInsert;
// Table: _CategoryToPost
export interface Categorytopost {
categoryId: number;
postId: number;
}
export const Categorytopost = pgTable('_CategoryToPost', {
categoryId: integer('CategoryId').primaryKey().references(() => category.id),
postId: integer('PostId').primaryKey().references(() => post.id),
});
export type NewCategorytopost = typeof Categorytopost.$inferInsert;

View File

@@ -0,0 +1,46 @@
# Complex GraphQL schema with multiple features
scalar DateTime
scalar JSON
scalar Date
enum Role {
USER
ADMIN
MODERATOR
}
type User {
id: ID!
email: String!
name: String!
role: Role!
createdAt: DateTime!
posts: [Post!]!
profile: Profile
}
type Profile {
id: ID!
bio: String
avatar: String
metadata: JSON
user: User!
}
type Post {
id: ID!
title: String!
slug: String!
content: String
published: Boolean!
publishedAt: Date
author: User!
tags: [Tag!]!
}
type Tag {
id: ID!
name: String!
posts: [Post!]!
}

View File

@@ -0,0 +1,13 @@
# GraphQL schema with custom scalars
scalar DateTime
scalar JSON
scalar Date
type User {
id: ID!
email: String!
createdAt: DateTime!
metadata: JSON
birthDate: Date
}

View File

@@ -0,0 +1,13 @@
# GraphQL schema with enums
enum Role {
ADMIN
USER
GUEST
}
type User {
id: ID!
email: String!
role: Role!
}

View File

@@ -0,0 +1,16 @@
# GraphQL schema with relationships
type User {
id: ID!
email: String!
name: String!
posts: [Post!]!
}
type Post {
id: ID!
title: String!
content: String
published: Boolean!
author: User!
}

View File

@@ -0,0 +1,9 @@
# Simple GraphQL schema for testing basic type parsing
type User {
id: ID!
email: String!
name: String
age: Int
active: Boolean!
}