Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 9f29bc112e | |||
| b55737ab4c | |||
| 2a271b9859 | |||
| beb5b4fac8 | |||
| e61204cb3c | |||
| d52b9cdc14 | |||
| f98b278d72 | |||
| 666eab7cec | |||
| 35bc9dfb5c | |||
| aad5db5175 | |||
| d9225a7310 | |||
| 79effe6921 | |||
| 289715ba44 | |||
| 8ca2b50f9c |
34
.github/workflows/ci.yml
vendored
34
.github/workflows/ci.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go-version: ['1.23', '1.24', '1.25']
|
go-version: ['1.24', '1.25']
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
@@ -34,8 +34,8 @@ jobs:
|
|||||||
- name: Download dependencies
|
- name: Download dependencies
|
||||||
run: go mod download
|
run: go mod download
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run unit tests
|
||||||
run: go test -v -race -coverprofile=coverage.out -covermode=atomic ./...
|
run: make test
|
||||||
|
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v4
|
||||||
@@ -57,11 +57,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: '1.25'
|
go-version: '1.25'
|
||||||
|
|
||||||
- name: golangci-lint
|
- name: Install golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v6
|
run: |
|
||||||
with:
|
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest
|
||||||
version: latest
|
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
|
||||||
args: --config=.golangci.json
|
|
||||||
|
- name: Run linter
|
||||||
|
run: make lint
|
||||||
|
|
||||||
build:
|
build:
|
||||||
name: Build
|
name: Build
|
||||||
@@ -76,8 +78,20 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: '1.25'
|
go-version: '1.25'
|
||||||
|
|
||||||
- name: Build
|
- name: Download dependencies
|
||||||
run: go build -v ./cmd/relspec
|
run: go mod download
|
||||||
|
|
||||||
|
- name: Build binary
|
||||||
|
run: make build
|
||||||
|
|
||||||
|
- name: Verify binary exists
|
||||||
|
run: |
|
||||||
|
if [ ! -f build/relspec ]; then
|
||||||
|
echo "Error: Binary not found at build/relspec"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "Build successful: build/relspec"
|
||||||
|
ls -lh build/relspec
|
||||||
|
|
||||||
- name: Check mod tidiness
|
- name: Check mod tidiness
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
91
.github/workflows/integration-tests.yml
vendored
Normal file
91
.github/workflows/integration-tests.yml
vendored
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
name: Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ master ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
integration-tests:
|
||||||
|
name: Integration Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: '1.25'
|
||||||
|
|
||||||
|
- name: Cache Go modules
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
|
- name: Download dependencies
|
||||||
|
run: go mod download
|
||||||
|
|
||||||
|
- name: Start PostgreSQL container
|
||||||
|
run: |
|
||||||
|
docker run -d \
|
||||||
|
--name relspec-test-postgres \
|
||||||
|
--network host \
|
||||||
|
-e POSTGRES_USER=relspec \
|
||||||
|
-e POSTGRES_PASSWORD=relspec_test_password \
|
||||||
|
-e POSTGRES_DB=relspec_test \
|
||||||
|
postgres:16-alpine
|
||||||
|
|
||||||
|
- name: Wait for PostgreSQL to be ready
|
||||||
|
run: |
|
||||||
|
echo "Waiting for PostgreSQL to start..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
if docker exec relspec-test-postgres pg_isready -U relspec -d relspec_test > /dev/null 2>&1; then
|
||||||
|
echo "PostgreSQL is ready!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Waiting... ($i/30)"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
- name: Copy init script into container
|
||||||
|
run: |
|
||||||
|
docker cp tests/postgres/init.sql relspec-test-postgres:/tmp/init.sql
|
||||||
|
|
||||||
|
- name: Initialize test database
|
||||||
|
run: |
|
||||||
|
docker exec relspec-test-postgres psql -U relspec -d relspec_test -f /tmp/init.sql
|
||||||
|
|
||||||
|
- name: Verify database setup
|
||||||
|
run: |
|
||||||
|
echo "Verifying database initialization..."
|
||||||
|
docker exec relspec-test-postgres psql -U relspec -d relspec_test -c "
|
||||||
|
SELECT
|
||||||
|
(SELECT COUNT(*) FROM pg_namespace WHERE nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast') AND nspname NOT LIKE 'pg_%') as schemas,
|
||||||
|
(SELECT COUNT(*) FROM pg_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as tables,
|
||||||
|
(SELECT COUNT(*) FROM pg_views WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as views,
|
||||||
|
(SELECT COUNT(*) FROM pg_sequences WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as sequences;
|
||||||
|
"
|
||||||
|
|
||||||
|
- name: Run integration tests
|
||||||
|
env:
|
||||||
|
RELSPEC_TEST_PG_CONN: postgres://relspec:relspec_test_password@localhost:5432/relspec_test
|
||||||
|
run: make test-integration
|
||||||
|
|
||||||
|
- name: Stop PostgreSQL container
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker stop relspec-test-postgres || true
|
||||||
|
docker rm relspec-test-postgres || true
|
||||||
|
|
||||||
|
- name: Summary
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
echo "Integration tests completed."
|
||||||
|
echo "PostgreSQL container has been cleaned up."
|
||||||
116
.github/workflows/release.yml
vendored
Normal file
116
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*.*.*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-release:
|
||||||
|
name: Build and Release
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: '1.25'
|
||||||
|
|
||||||
|
- name: Get version from tag
|
||||||
|
id: get_version
|
||||||
|
run: |
|
||||||
|
echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||||
|
echo "Version: ${GITHUB_REF#refs/tags/}"
|
||||||
|
|
||||||
|
- name: Build binaries for multiple platforms
|
||||||
|
run: |
|
||||||
|
mkdir -p dist
|
||||||
|
|
||||||
|
# Linux AMD64
|
||||||
|
GOOS=linux GOARCH=amd64 go build -o dist/relspec-linux-amd64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||||
|
|
||||||
|
# Linux ARM64
|
||||||
|
GOOS=linux GOARCH=arm64 go build -o dist/relspec-linux-arm64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||||
|
|
||||||
|
# macOS AMD64
|
||||||
|
GOOS=darwin GOARCH=amd64 go build -o dist/relspec-darwin-amd64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||||
|
|
||||||
|
# macOS ARM64 (Apple Silicon)
|
||||||
|
GOOS=darwin GOARCH=arm64 go build -o dist/relspec-darwin-arm64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||||
|
|
||||||
|
# Windows AMD64
|
||||||
|
GOOS=windows GOARCH=amd64 go build -o dist/relspec-windows-amd64.exe -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||||
|
|
||||||
|
# Create checksums
|
||||||
|
cd dist
|
||||||
|
sha256sum * > checksums.txt
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
- name: Generate release notes
|
||||||
|
id: release_notes
|
||||||
|
run: |
|
||||||
|
# Get the previous tag
|
||||||
|
previous_tag=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -z "$previous_tag" ]; then
|
||||||
|
# No previous tag, get all commits
|
||||||
|
commits=$(git log --pretty=format:"- %s (%h)" --no-merges)
|
||||||
|
else
|
||||||
|
# Get commits since the previous tag
|
||||||
|
commits=$(git log "${previous_tag}..HEAD" --pretty=format:"- %s (%h)" --no-merges)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create release notes
|
||||||
|
cat > release_notes.md << EOF
|
||||||
|
# Release ${{ steps.get_version.outputs.VERSION }}
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
|
||||||
|
${commits}
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
Download the appropriate binary for your platform:
|
||||||
|
|
||||||
|
- **Linux (AMD64)**: \`relspec-linux-amd64\`
|
||||||
|
- **Linux (ARM64)**: \`relspec-linux-arm64\`
|
||||||
|
- **macOS (Intel)**: \`relspec-darwin-amd64\`
|
||||||
|
- **macOS (Apple Silicon)**: \`relspec-darwin-arm64\`
|
||||||
|
- **Windows (AMD64)**: \`relspec-windows-amd64.exe\`
|
||||||
|
|
||||||
|
Make the binary executable (Linux/macOS):
|
||||||
|
\`\`\`bash
|
||||||
|
chmod +x relspec-*
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
Verify the download with the provided checksums.
|
||||||
|
EOF
|
||||||
|
|
||||||
|
- name: Create Release
|
||||||
|
uses: softprops/action-gh-release@v1
|
||||||
|
with:
|
||||||
|
body_path: release_notes.md
|
||||||
|
files: |
|
||||||
|
dist/relspec-linux-amd64
|
||||||
|
dist/relspec-linux-arm64
|
||||||
|
dist/relspec-darwin-amd64
|
||||||
|
dist/relspec-darwin-arm64
|
||||||
|
dist/relspec-windows-amd64.exe
|
||||||
|
dist/checksums.txt
|
||||||
|
draft: false
|
||||||
|
prerelease: false
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Summary
|
||||||
|
run: |
|
||||||
|
echo "Release ${{ steps.get_version.outputs.VERSION }} created successfully!"
|
||||||
|
echo "Binaries built for:"
|
||||||
|
echo " - Linux (amd64, arm64)"
|
||||||
|
echo " - macOS (amd64, arm64)"
|
||||||
|
echo " - Windows (amd64)"
|
||||||
35
AI_USE.md
Normal file
35
AI_USE.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# AI Usage Declaration
|
||||||
|
|
||||||
|
This Go project utilizes AI tools for the following purposes:
|
||||||
|
|
||||||
|
- Generating and improving documentation
|
||||||
|
- Writing and enhancing tests
|
||||||
|
- Refactoring and optimizing existing code
|
||||||
|
|
||||||
|
AI is **not** used for core design or architecture decisions.
|
||||||
|
All design decisions are deferred to human discussion.
|
||||||
|
AI is employed only for enhancements to human-written code.
|
||||||
|
|
||||||
|
We are aware of significant AI hallucinations; all AI-generated content is to be reviewed and verified by humans.
|
||||||
|
|
||||||
|
|
||||||
|
.-""""""-.
|
||||||
|
.' '.
|
||||||
|
/ O O \
|
||||||
|
: ` :
|
||||||
|
| |
|
||||||
|
: .------. :
|
||||||
|
\ ' ' /
|
||||||
|
'. .'
|
||||||
|
'-......-'
|
||||||
|
MEGAMIND AI
|
||||||
|
[============]
|
||||||
|
|
||||||
|
___________
|
||||||
|
/___________\
|
||||||
|
/_____________\
|
||||||
|
| ASSIMILATE |
|
||||||
|
| RESISTANCE |
|
||||||
|
| IS FUTILE |
|
||||||
|
\_____________/
|
||||||
|
\___________/
|
||||||
81
Makefile
81
Makefile
@@ -1,4 +1,4 @@
|
|||||||
.PHONY: all build test lint coverage clean install help docker-up docker-down docker-test docker-test-integration
|
.PHONY: all build test test-unit test-integration lint coverage clean install help docker-up docker-down docker-test docker-test-integration release release-version
|
||||||
|
|
||||||
# Binary name
|
# Binary name
|
||||||
BINARY_NAME=relspec
|
BINARY_NAME=relspec
|
||||||
@@ -22,9 +22,23 @@ build: ## Build the binary
|
|||||||
$(GOBUILD) -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/relspec
|
$(GOBUILD) -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/relspec
|
||||||
@echo "Build complete: $(BUILD_DIR)/$(BINARY_NAME)"
|
@echo "Build complete: $(BUILD_DIR)/$(BINARY_NAME)"
|
||||||
|
|
||||||
test: ## Run tests
|
test: test-unit ## Run all unit tests (alias for test-unit)
|
||||||
@echo "Running tests..."
|
|
||||||
$(GOTEST) -v -race -coverprofile=coverage.out ./...
|
test-unit: ## Run unit tests (excludes integration tests)
|
||||||
|
@echo "Running unit tests..."
|
||||||
|
$(GOTEST) -v -race -coverprofile=coverage.out -covermode=atomic $$(go list ./... | grep -v '/tests/integration' | grep -v '/tests/assets' | grep -v '/pkg/readers/pgsql')
|
||||||
|
|
||||||
|
test-integration: ## Run integration tests (requires RELSPEC_TEST_PG_CONN environment variable)
|
||||||
|
@echo "Running integration tests..."
|
||||||
|
@if [ -z "$$RELSPEC_TEST_PG_CONN" ]; then \
|
||||||
|
echo "Error: RELSPEC_TEST_PG_CONN environment variable is not set"; \
|
||||||
|
echo "Example: export RELSPEC_TEST_PG_CONN='postgres://relspec:relspec_test_password@localhost:5432/relspec_test'"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
@echo "Running PostgreSQL reader tests..."
|
||||||
|
$(GOTEST) -v -count=1 ./pkg/readers/pgsql/
|
||||||
|
@echo "Running general integration tests..."
|
||||||
|
$(GOTEST) -v -count=1 ./tests/integration/
|
||||||
|
|
||||||
coverage: test ## Run tests with coverage report
|
coverage: test ## Run tests with coverage report
|
||||||
@echo "Generating coverage report..."
|
@echo "Generating coverage report..."
|
||||||
@@ -40,6 +54,15 @@ lint: ## Run linter
|
|||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
lintfix: ## Run linter
|
||||||
|
@echo "Running linter..."
|
||||||
|
@if command -v golangci-lint > /dev/null; then \
|
||||||
|
golangci-lint run --config=.golangci.json --fix; \
|
||||||
|
else \
|
||||||
|
echo "golangci-lint not installed. Install with: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
clean: ## Clean build artifacts
|
clean: ## Clean build artifacts
|
||||||
@echo "Cleaning..."
|
@echo "Cleaning..."
|
||||||
$(GOCLEAN)
|
$(GOCLEAN)
|
||||||
@@ -89,5 +112,55 @@ docker-test-integration: docker-up ## Start DB and run integration tests
|
|||||||
$(GOTEST) -v ./pkg/readers/pgsql/ -count=1 || (make docker-down && exit 1)
|
$(GOTEST) -v ./pkg/readers/pgsql/ -count=1 || (make docker-down && exit 1)
|
||||||
@make docker-down
|
@make docker-down
|
||||||
|
|
||||||
|
release: ## Create and push a new release tag (auto-increments patch version)
|
||||||
|
@echo "Creating new release..."
|
||||||
|
@latest_tag=$$(git describe --tags --abbrev=0 2>/dev/null || echo ""); \
|
||||||
|
if [ -z "$$latest_tag" ]; then \
|
||||||
|
version="v1.0.0"; \
|
||||||
|
echo "No existing tags found. Creating first release: $$version"; \
|
||||||
|
commit_logs=$$(git log --pretty=format:"- %s" --no-merges); \
|
||||||
|
else \
|
||||||
|
echo "Latest tag: $$latest_tag"; \
|
||||||
|
version_number=$${latest_tag#v}; \
|
||||||
|
IFS='.' read -r major minor patch <<< "$$version_number"; \
|
||||||
|
patch=$$((patch + 1)); \
|
||||||
|
version="v$$major.$$minor.$$patch"; \
|
||||||
|
echo "Creating new release: $$version"; \
|
||||||
|
commit_logs=$$(git log "$${latest_tag}..HEAD" --pretty=format:"- %s" --no-merges); \
|
||||||
|
fi; \
|
||||||
|
if [ -z "$$commit_logs" ]; then \
|
||||||
|
tag_message="Release $$version"; \
|
||||||
|
else \
|
||||||
|
tag_message="Release $$version\n\n$$commit_logs"; \
|
||||||
|
fi; \
|
||||||
|
git tag -a "$$version" -m "$$tag_message"; \
|
||||||
|
git push origin "$$version"; \
|
||||||
|
echo "Tag $$version created and pushed to remote repository."
|
||||||
|
|
||||||
|
release-version: ## Create and push a release with specific version (use: make release-version VERSION=v1.2.3)
|
||||||
|
@if [ -z "$(VERSION)" ]; then \
|
||||||
|
echo "Error: VERSION is required. Usage: make release-version VERSION=v1.2.3"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
@version="$(VERSION)"; \
|
||||||
|
if ! echo "$$version" | grep -q "^v"; then \
|
||||||
|
version="v$$version"; \
|
||||||
|
fi; \
|
||||||
|
echo "Creating release: $$version"; \
|
||||||
|
latest_tag=$$(git describe --tags --abbrev=0 2>/dev/null || echo ""); \
|
||||||
|
if [ -z "$$latest_tag" ]; then \
|
||||||
|
commit_logs=$$(git log --pretty=format:"- %s" --no-merges); \
|
||||||
|
else \
|
||||||
|
commit_logs=$$(git log "$${latest_tag}..HEAD" --pretty=format:"- %s" --no-merges); \
|
||||||
|
fi; \
|
||||||
|
if [ -z "$$commit_logs" ]; then \
|
||||||
|
tag_message="Release $$version"; \
|
||||||
|
else \
|
||||||
|
tag_message="Release $$version\n\n$$commit_logs"; \
|
||||||
|
fi; \
|
||||||
|
git tag -a "$$version" -m "$$tag_message"; \
|
||||||
|
git push origin "$$version"; \
|
||||||
|
echo "Tag $$version created and pushed to remote repository."
|
||||||
|
|
||||||
help: ## Display this help screen
|
help: ## Display this help screen
|
||||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
|
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
|
||||||
|
|||||||
60
README.md
60
README.md
@@ -16,19 +16,53 @@ RelSpec provides bidirectional conversion and comparison between various databas
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
### Input Formats
|
### Readers (Input Formats)
|
||||||
- **XML** - Generic XML schema definitions
|
|
||||||
- **JSON** - JSON-based schema specifications
|
|
||||||
- **Clarion DCTX** - Clarion database dictionary format
|
|
||||||
- **Database Inspection** - Direct database introspection
|
|
||||||
- **GORM Models** - Read existing GORM Go structs
|
|
||||||
- **Bun Models** - Read existing Bun Go structs
|
|
||||||
|
|
||||||
### Output Formats
|
RelSpec can read database schemas from multiple sources:
|
||||||
- **GORM Models** - Generate GORM-compatible Go structs
|
|
||||||
- **Bun Models** - Generate Bun-compatible Go structs
|
#### ORM Models
|
||||||
- **JSON** - Standard JSON schema output
|
- [GORM](pkg/readers/gorm/README.md) - Go GORM model definitions
|
||||||
- **YAML** - Human-readable YAML format
|
- [Bun](pkg/readers/bun/README.md) - Go Bun model definitions
|
||||||
|
- [Drizzle](pkg/readers/drizzle/README.md) - TypeScript Drizzle ORM schemas
|
||||||
|
- [Prisma](pkg/readers/prisma/README.md) - Prisma schema language
|
||||||
|
- [TypeORM](pkg/readers/typeorm/README.md) - TypeScript TypeORM entities
|
||||||
|
|
||||||
|
#### Database Inspection
|
||||||
|
- [PostgreSQL](pkg/readers/pgsql/README.md) - Direct PostgreSQL database introspection
|
||||||
|
|
||||||
|
#### Schema Formats
|
||||||
|
- [DBML](pkg/readers/dbml/README.md) - Database Markup Language (dbdiagram.io)
|
||||||
|
- [DCTX](pkg/readers/dctx/README.md) - Clarion database dictionary format
|
||||||
|
- [DrawDB](pkg/readers/drawdb/README.md) - DrawDB JSON format
|
||||||
|
- [GraphQL](pkg/readers/graphql/README.md) - GraphQL Schema Definition Language (SDL)
|
||||||
|
- [JSON](pkg/readers/json/README.md) - RelSpec canonical JSON format
|
||||||
|
- [YAML](pkg/readers/yaml/README.md) - RelSpec canonical YAML format
|
||||||
|
|
||||||
|
### Writers (Output Formats)
|
||||||
|
|
||||||
|
RelSpec can write database schemas to multiple formats:
|
||||||
|
|
||||||
|
#### ORM Models
|
||||||
|
- [GORM](pkg/writers/gorm/README.md) - Generate GORM-compatible Go structs
|
||||||
|
- [Bun](pkg/writers/bun/README.md) - Generate Bun-compatible Go structs
|
||||||
|
- [Drizzle](pkg/writers/drizzle/README.md) - Generate Drizzle ORM TypeScript schemas
|
||||||
|
- [Prisma](pkg/writers/prisma/README.md) - Generate Prisma schema files
|
||||||
|
- [TypeORM](pkg/writers/typeorm/README.md) - Generate TypeORM TypeScript entities
|
||||||
|
|
||||||
|
#### Database DDL
|
||||||
|
- [PostgreSQL](pkg/writers/pgsql/README.md) - PostgreSQL DDL (CREATE TABLE, etc.)
|
||||||
|
|
||||||
|
#### Schema Formats
|
||||||
|
- [DBML](pkg/writers/dbml/README.md) - Database Markup Language
|
||||||
|
- [DCTX](pkg/writers/dctx/README.md) - Clarion database dictionary format
|
||||||
|
- [DrawDB](pkg/writers/drawdb/README.md) - DrawDB JSON format
|
||||||
|
- [GraphQL](pkg/writers/graphql/README.md) - GraphQL Schema Definition Language (SDL)
|
||||||
|
- [JSON](pkg/writers/json/README.md) - RelSpec canonical JSON format
|
||||||
|
- [YAML](pkg/writers/yaml/README.md) - RelSpec canonical YAML format
|
||||||
|
|
||||||
|
|
||||||
|
## Use of AI
|
||||||
|
[Rules and use of AI](./AI_USE.md)
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@@ -94,7 +128,7 @@ go test ./...
|
|||||||
|
|
||||||
Apache License 2.0 - See [LICENSE](LICENSE) for details.
|
Apache License 2.0 - See [LICENSE](LICENSE) for details.
|
||||||
|
|
||||||
Copyright 2025 wdevs
|
Copyright 2025 Warky Devs
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
|
|||||||
29
TODO.md
29
TODO.md
@@ -2,22 +2,23 @@
|
|||||||
|
|
||||||
|
|
||||||
## Input Readers / Writers
|
## Input Readers / Writers
|
||||||
- [x] **Database Inspector**
|
- [✔️] **Database Inspector**
|
||||||
- [x] PostgreSQL driver
|
- [✔️] PostgreSQL driver
|
||||||
- [ ] MySQL driver
|
- [ ] MySQL driver
|
||||||
- [ ] SQLite driver
|
- [ ] SQLite driver
|
||||||
- [ ] MSSQL driver
|
- [ ] MSSQL driver
|
||||||
- [x] Foreign key detection
|
- [✔️] Foreign key detection
|
||||||
- [x] Index extraction
|
- [✔️] Index extraction
|
||||||
- [ ] .sql file generation with sequence and priority
|
- [*] .sql file generation with sequence and priority
|
||||||
- [*] .dbml: Database Markup Language (DBML) for textual schema representation.
|
- [✔️] .dbml: Database Markup Language (DBML) for textual schema representation.
|
||||||
- [ ] Prisma schema support (PSL format) .prisma
|
- [✔️] Prisma schema support (PSL format) .prisma
|
||||||
- [ ] Entity Framework (.NET) model .edmx
|
- [✔️] Drizzle ORM support .ts (TypeScript / JavaScript) (Mr. Edd wanted to move from Prisma to Drizzle. If you are bugs, you are welcome to do pull requests or issues)
|
||||||
- [ ] TypeORM support
|
- [☠️] Entity Framework (.NET) model .edmx (Fuck no, EDMX files were bloated, verbose XML nightmares—hard to merge, error-prone, and a pain in teams. Microsoft wisely ditched them in EF Core for code-first. Classic overkill from old MS era.)
|
||||||
- [ ] .hbm.xml / schema.xml: Hibernate/Propel mappings (Java/PHP)
|
- [✔️] TypeORM support
|
||||||
- [ ] Django models.py (Python classes), Sequelize migrations (JS)
|
- [] .hbm.xml / schema.xml: Hibernate/Propel mappings (Java/PHP) (💲 Someone can do this, not me)
|
||||||
- [ ] .avsc: Avro schema (JSON format for data serialization)
|
- [ ] Django models.py (Python classes), Sequelize migrations (JS) (💲 Someone can do this, not me)
|
||||||
|
- [] .avsc: Avro schema (JSON format for data serialization) (💲 Someone can do this, not me)
|
||||||
|
- [✔️] GraphQL schema generation
|
||||||
|
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
@@ -36,7 +37,7 @@
|
|||||||
- [ ] Web UI for visual editing
|
- [ ] Web UI for visual editing
|
||||||
- [ ] REST API server mode
|
- [ ] REST API server mode
|
||||||
- [ ] Support for NoSQL databases
|
- [ ] Support for NoSQL databases
|
||||||
- [ ] GraphQL schema generation
|
|
||||||
|
|
||||||
## Performance
|
## Performance
|
||||||
- [ ] Concurrent processing for multiple tables
|
- [ ] Concurrent processing for multiple tables
|
||||||
|
|||||||
@@ -6,26 +6,35 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/bun"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/bun"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/dbml"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/dbml"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/dctx"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/dctx"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/drawdb"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/drawdb"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/drizzle"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/gorm"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/gorm"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/graphql"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/prisma"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/typeorm"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
wbun "git.warky.dev/wdevs/relspecgo/pkg/writers/bun"
|
wbun "git.warky.dev/wdevs/relspecgo/pkg/writers/bun"
|
||||||
wdbml "git.warky.dev/wdevs/relspecgo/pkg/writers/dbml"
|
wdbml "git.warky.dev/wdevs/relspecgo/pkg/writers/dbml"
|
||||||
wdctx "git.warky.dev/wdevs/relspecgo/pkg/writers/dctx"
|
wdctx "git.warky.dev/wdevs/relspecgo/pkg/writers/dctx"
|
||||||
wdrawdb "git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
|
wdrawdb "git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
|
||||||
|
wdrizzle "git.warky.dev/wdevs/relspecgo/pkg/writers/drizzle"
|
||||||
wgorm "git.warky.dev/wdevs/relspecgo/pkg/writers/gorm"
|
wgorm "git.warky.dev/wdevs/relspecgo/pkg/writers/gorm"
|
||||||
|
wgraphql "git.warky.dev/wdevs/relspecgo/pkg/writers/graphql"
|
||||||
wjson "git.warky.dev/wdevs/relspecgo/pkg/writers/json"
|
wjson "git.warky.dev/wdevs/relspecgo/pkg/writers/json"
|
||||||
wpgsql "git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql"
|
wpgsql "git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql"
|
||||||
|
wprisma "git.warky.dev/wdevs/relspecgo/pkg/writers/prisma"
|
||||||
|
wtypeorm "git.warky.dev/wdevs/relspecgo/pkg/writers/typeorm"
|
||||||
wyaml "git.warky.dev/wdevs/relspecgo/pkg/writers/yaml"
|
wyaml "git.warky.dev/wdevs/relspecgo/pkg/writers/yaml"
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -51,20 +60,28 @@ Input formats:
|
|||||||
- dbml: DBML schema files
|
- dbml: DBML schema files
|
||||||
- dctx: DCTX schema files
|
- dctx: DCTX schema files
|
||||||
- drawdb: DrawDB JSON files
|
- drawdb: DrawDB JSON files
|
||||||
|
- graphql: GraphQL schema files (.graphql, SDL)
|
||||||
- json: JSON database schema
|
- json: JSON database schema
|
||||||
- yaml: YAML database schema
|
- yaml: YAML database schema
|
||||||
- gorm: GORM model files (Go, file or directory)
|
- gorm: GORM model files (Go, file or directory)
|
||||||
- bun: Bun model files (Go, file or directory)
|
- bun: Bun model files (Go, file or directory)
|
||||||
|
- drizzle: Drizzle ORM schema files (TypeScript, file or directory)
|
||||||
|
- prisma: Prisma schema files (.prisma)
|
||||||
|
- typeorm: TypeORM entity files (TypeScript)
|
||||||
- pgsql: PostgreSQL database (live connection)
|
- pgsql: PostgreSQL database (live connection)
|
||||||
|
|
||||||
Output formats:
|
Output formats:
|
||||||
- dbml: DBML schema files
|
- dbml: DBML schema files
|
||||||
- dctx: DCTX schema files
|
- dctx: DCTX schema files
|
||||||
- drawdb: DrawDB JSON files
|
- drawdb: DrawDB JSON files
|
||||||
|
- graphql: GraphQL schema files (.graphql, SDL)
|
||||||
- json: JSON database schema
|
- json: JSON database schema
|
||||||
- yaml: YAML database schema
|
- yaml: YAML database schema
|
||||||
- gorm: GORM model files (Go)
|
- gorm: GORM model files (Go)
|
||||||
- bun: Bun model files (Go)
|
- bun: Bun model files (Go)
|
||||||
|
- drizzle: Drizzle ORM schema files (TypeScript)
|
||||||
|
- prisma: Prisma schema files (.prisma)
|
||||||
|
- typeorm: TypeORM entity files (TypeScript)
|
||||||
- pgsql: PostgreSQL SQL schema
|
- pgsql: PostgreSQL SQL schema
|
||||||
|
|
||||||
PostgreSQL Connection String Examples:
|
PostgreSQL Connection String Examples:
|
||||||
@@ -123,18 +140,27 @@ Examples:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
convertCmd.Flags().StringVar(&convertSourceType, "from", "", "Source format (dbml, dctx, drawdb, json, yaml, gorm, bun, pgsql)")
|
convertCmd.Flags().StringVar(&convertSourceType, "from", "", "Source format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql)")
|
||||||
convertCmd.Flags().StringVar(&convertSourcePath, "from-path", "", "Source file path (for file-based formats)")
|
convertCmd.Flags().StringVar(&convertSourcePath, "from-path", "", "Source file path (for file-based formats)")
|
||||||
convertCmd.Flags().StringVar(&convertSourceConn, "from-conn", "", "Source connection string (for database formats)")
|
convertCmd.Flags().StringVar(&convertSourceConn, "from-conn", "", "Source connection string (for database formats)")
|
||||||
|
|
||||||
convertCmd.Flags().StringVar(&convertTargetType, "to", "", "Target format (dbml, dctx, drawdb, json, yaml, gorm, bun, pgsql)")
|
convertCmd.Flags().StringVar(&convertTargetType, "to", "", "Target format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql)")
|
||||||
convertCmd.Flags().StringVar(&convertTargetPath, "to-path", "", "Target output path (file or directory)")
|
convertCmd.Flags().StringVar(&convertTargetPath, "to-path", "", "Target output path (file or directory)")
|
||||||
convertCmd.Flags().StringVar(&convertPackageName, "package", "", "Package name (for code generation formats like gorm/bun)")
|
convertCmd.Flags().StringVar(&convertPackageName, "package", "", "Package name (for code generation formats like gorm/bun)")
|
||||||
convertCmd.Flags().StringVar(&convertSchemaFilter, "schema", "", "Filter to a specific schema by name (required for formats like dctx that only support single schemas)")
|
convertCmd.Flags().StringVar(&convertSchemaFilter, "schema", "", "Filter to a specific schema by name (required for formats like dctx that only support single schemas)")
|
||||||
|
|
||||||
convertCmd.MarkFlagRequired("from")
|
err := convertCmd.MarkFlagRequired("from")
|
||||||
convertCmd.MarkFlagRequired("to")
|
if err != nil {
|
||||||
convertCmd.MarkFlagRequired("to-path")
|
fmt.Fprintf(os.Stderr, "Error marking from flag as required: %v\n", err)
|
||||||
|
}
|
||||||
|
err = convertCmd.MarkFlagRequired("to")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error marking to flag as required: %v\n", err)
|
||||||
|
}
|
||||||
|
err = convertCmd.MarkFlagRequired("to-path")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error marking to-path flag as required: %v\n", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runConvert(cmd *cobra.Command, args []string) error {
|
func runConvert(cmd *cobra.Command, args []string) error {
|
||||||
@@ -239,6 +265,30 @@ func readDatabaseForConvert(dbType, filePath, connString string) (*models.Databa
|
|||||||
}
|
}
|
||||||
reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath})
|
reader = bun.NewReader(&readers.ReaderOptions{FilePath: filePath})
|
||||||
|
|
||||||
|
case "drizzle":
|
||||||
|
if filePath == "" {
|
||||||
|
return nil, fmt.Errorf("file path is required for Drizzle format")
|
||||||
|
}
|
||||||
|
reader = drizzle.NewReader(&readers.ReaderOptions{FilePath: filePath})
|
||||||
|
|
||||||
|
case "prisma":
|
||||||
|
if filePath == "" {
|
||||||
|
return nil, fmt.Errorf("file path is required for Prisma format")
|
||||||
|
}
|
||||||
|
reader = prisma.NewReader(&readers.ReaderOptions{FilePath: filePath})
|
||||||
|
|
||||||
|
case "typeorm":
|
||||||
|
if filePath == "" {
|
||||||
|
return nil, fmt.Errorf("file path is required for TypeORM format")
|
||||||
|
}
|
||||||
|
reader = typeorm.NewReader(&readers.ReaderOptions{FilePath: filePath})
|
||||||
|
|
||||||
|
case "graphql", "gql":
|
||||||
|
if filePath == "" {
|
||||||
|
return nil, fmt.Errorf("file path is required for GraphQL format")
|
||||||
|
}
|
||||||
|
reader = graphql.NewReader(&readers.ReaderOptions{FilePath: filePath})
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unsupported source format: %s", dbType)
|
return nil, fmt.Errorf("unsupported source format: %s", dbType)
|
||||||
}
|
}
|
||||||
@@ -287,9 +337,21 @@ func writeDatabase(db *models.Database, dbType, outputPath, packageName, schemaF
|
|||||||
}
|
}
|
||||||
writer = wbun.NewWriter(writerOpts)
|
writer = wbun.NewWriter(writerOpts)
|
||||||
|
|
||||||
|
case "drizzle":
|
||||||
|
writer = wdrizzle.NewWriter(writerOpts)
|
||||||
|
|
||||||
case "pgsql", "postgres", "postgresql", "sql":
|
case "pgsql", "postgres", "postgresql", "sql":
|
||||||
writer = wpgsql.NewWriter(writerOpts)
|
writer = wpgsql.NewWriter(writerOpts)
|
||||||
|
|
||||||
|
case "prisma":
|
||||||
|
writer = wprisma.NewWriter(writerOpts)
|
||||||
|
|
||||||
|
case "typeorm":
|
||||||
|
writer = wtypeorm.NewWriter(writerOpts)
|
||||||
|
|
||||||
|
case "graphql", "gql":
|
||||||
|
writer = wgraphql.NewWriter(writerOpts)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported target format: %s", dbType)
|
return fmt.Errorf("unsupported target format: %s", dbType)
|
||||||
}
|
}
|
||||||
@@ -318,7 +380,7 @@ func writeDatabase(db *models.Database, dbType, outputPath, packageName, schemaF
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For formats like DCTX that don't support full database writes, require schema filter
|
// For formats like DCTX that don't support full database writes, require schema filter
|
||||||
if strings.ToLower(dbType) == "dctx" {
|
if strings.EqualFold(dbType, "dctx") {
|
||||||
if len(db.Schemas) == 0 {
|
if len(db.Schemas) == 0 {
|
||||||
return fmt.Errorf("no schemas found in database")
|
return fmt.Errorf("no schemas found in database")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/diff"
|
"git.warky.dev/wdevs/relspecgo/pkg/diff"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
@@ -15,7 +17,6 @@ import (
|
|||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -96,8 +97,14 @@ func init() {
|
|||||||
diffCmd.Flags().StringVar(&outputFormat, "format", "summary", "Output format (summary, json, html)")
|
diffCmd.Flags().StringVar(&outputFormat, "format", "summary", "Output format (summary, json, html)")
|
||||||
diffCmd.Flags().StringVar(&outputPath, "output", "", "Output file path (default: stdout for summary, required for json/html)")
|
diffCmd.Flags().StringVar(&outputPath, "output", "", "Output file path (default: stdout for summary, required for json/html)")
|
||||||
|
|
||||||
diffCmd.MarkFlagRequired("from")
|
err := diffCmd.MarkFlagRequired("from")
|
||||||
diffCmd.MarkFlagRequired("to")
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error marking from flag as required: %v\n", err)
|
||||||
|
}
|
||||||
|
err = diffCmd.MarkFlagRequired("to")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error marking to flag as required: %v\n", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runDiff(cmd *cobra.Command, args []string) error {
|
func runDiff(cmd *cobra.Command, args []string) error {
|
||||||
|
|||||||
9
go.mod
9
go.mod
@@ -1,12 +1,13 @@
|
|||||||
module git.warky.dev/wdevs/relspecgo
|
module git.warky.dev/wdevs/relspecgo
|
||||||
|
|
||||||
go 1.24
|
go 1.24.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/jackc/pgx/v5 v5.7.6
|
github.com/jackc/pgx/v5 v5.7.6
|
||||||
github.com/spf13/cobra v1.10.2
|
github.com/spf13/cobra v1.10.2
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
|
github.com/uptrace/bun v1.2.16
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -15,10 +16,16 @@ require (
|
|||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||||
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/kr/pretty v0.3.1 // indirect
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||||
github.com/spf13/pflag v1.0.10 // indirect
|
github.com/spf13/pflag v1.0.10 // indirect
|
||||||
|
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||||
|
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||||
|
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||||
golang.org/x/crypto v0.41.0 // indirect
|
golang.org/x/crypto v0.41.0 // indirect
|
||||||
|
golang.org/x/sys v0.38.0 // indirect
|
||||||
golang.org/x/text v0.28.0 // indirect
|
golang.org/x/text v0.28.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
14
go.sum
14
go.sum
@@ -15,6 +15,8 @@ github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
|||||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||||
|
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||||
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
@@ -22,6 +24,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|||||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||||
@@ -36,11 +40,21 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
|
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
|
||||||
|
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
||||||
|
github.com/uptrace/bun v1.2.16 h1:QlObi6ZIK5Ao7kAALnh91HWYNZUBbVwye52fmlQM9kc=
|
||||||
|
github.com/uptrace/bun v1.2.16/go.mod h1:jMoNg2n56ckaawi/O/J92BHaECmrz6IRjuMWqlMaMTM=
|
||||||
|
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
|
||||||
|
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||||
|
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||||
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
|||||||
@@ -1,71 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Ask if the user wants to make a release version
|
|
||||||
read -p "Do you want to make a release version? (y/n): " make_release
|
|
||||||
|
|
||||||
if [[ $make_release =~ ^[Yy]$ ]]; then
|
|
||||||
# Get the latest tag from git
|
|
||||||
latest_tag=$(git describe --tags --abbrev=0 2>/dev/null)
|
|
||||||
|
|
||||||
if [ -z "$latest_tag" ]; then
|
|
||||||
# No tags exist yet, start with v1.0.0
|
|
||||||
suggested_version="v1.0.0"
|
|
||||||
echo "No existing tags found. Starting with $suggested_version"
|
|
||||||
else
|
|
||||||
echo "Latest tag: $latest_tag"
|
|
||||||
|
|
||||||
# Remove 'v' prefix if present
|
|
||||||
version_number="${latest_tag#v}"
|
|
||||||
|
|
||||||
# Split version into major.minor.patch
|
|
||||||
IFS='.' read -r major minor patch <<< "$version_number"
|
|
||||||
|
|
||||||
# Increment patch version
|
|
||||||
patch=$((patch + 1))
|
|
||||||
|
|
||||||
# Construct new version
|
|
||||||
suggested_version="v${major}.${minor}.${patch}"
|
|
||||||
echo "Suggested next version: $suggested_version"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Ask the user for the version number with the suggested version as default
|
|
||||||
read -p "Enter the version number (press Enter for $suggested_version): " version
|
|
||||||
|
|
||||||
# Use suggested version if user pressed Enter without input
|
|
||||||
if [ -z "$version" ]; then
|
|
||||||
version="$suggested_version"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Prepend 'v' to the version if it doesn't start with it
|
|
||||||
if ! [[ $version =~ ^v ]]; then
|
|
||||||
version="v$version"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get commit logs since the last tag
|
|
||||||
if [ -z "$latest_tag" ]; then
|
|
||||||
# No previous tag, get all commits
|
|
||||||
commit_logs=$(git log --pretty=format:"- %s" --no-merges)
|
|
||||||
else
|
|
||||||
# Get commits since the last tag
|
|
||||||
commit_logs=$(git log "${latest_tag}..HEAD" --pretty=format:"- %s" --no-merges)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create the tag message
|
|
||||||
if [ -z "$commit_logs" ]; then
|
|
||||||
tag_message="Release $version"
|
|
||||||
else
|
|
||||||
tag_message="Release $version
|
|
||||||
|
|
||||||
${commit_logs}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create an annotated tag with the commit logs
|
|
||||||
git tag -a "$version" -m "$tag_message"
|
|
||||||
|
|
||||||
# Push the tag to the remote repository
|
|
||||||
git push origin "$version"
|
|
||||||
|
|
||||||
echo "Tag $version created and pushed to the remote repository."
|
|
||||||
else
|
|
||||||
echo "No release version created."
|
|
||||||
fi
|
|
||||||
@@ -2,14 +2,15 @@ package diff
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CompareDatabases compares two database models and returns the differences
|
// CompareDatabases compares two database models and returns the differences
|
||||||
func CompareDatabases(source, target *models.Database) *DiffResult {
|
func CompareDatabases(source, target *models.Database) *DiffResult {
|
||||||
result := &DiffResult{
|
result := &DiffResult{
|
||||||
Source: source.Name,
|
Source: source.Name,
|
||||||
Target: target.Name,
|
Target: target.Name,
|
||||||
Schemas: compareSchemas(source.Schemas, target.Schemas),
|
Schemas: compareSchemas(source.Schemas, target.Schemas),
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import "git.warky.dev/wdevs/relspecgo/pkg/models"
|
|||||||
|
|
||||||
// DiffResult represents the complete difference analysis between two databases
|
// DiffResult represents the complete difference analysis between two databases
|
||||||
type DiffResult struct {
|
type DiffResult struct {
|
||||||
Source string `json:"source"`
|
Source string `json:"source"`
|
||||||
Target string `json:"target"`
|
Target string `json:"target"`
|
||||||
Schemas *SchemaDiff `json:"schemas"`
|
Schemas *SchemaDiff `json:"schemas"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -18,17 +18,17 @@ type SchemaDiff struct {
|
|||||||
|
|
||||||
// SchemaChange represents changes within a schema
|
// SchemaChange represents changes within a schema
|
||||||
type SchemaChange struct {
|
type SchemaChange struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Tables *TableDiff `json:"tables,omitempty"`
|
Tables *TableDiff `json:"tables,omitempty"`
|
||||||
Views *ViewDiff `json:"views,omitempty"`
|
Views *ViewDiff `json:"views,omitempty"`
|
||||||
Sequences *SequenceDiff `json:"sequences,omitempty"`
|
Sequences *SequenceDiff `json:"sequences,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableDiff represents differences in tables
|
// TableDiff represents differences in tables
|
||||||
type TableDiff struct {
|
type TableDiff struct {
|
||||||
Missing []*models.Table `json:"missing"` // Tables in source but not in target
|
Missing []*models.Table `json:"missing"` // Tables in source but not in target
|
||||||
Extra []*models.Table `json:"extra"` // Tables in target but not in source
|
Extra []*models.Table `json:"extra"` // Tables in target but not in source
|
||||||
Modified []*TableChange `json:"modified"` // Tables that exist in both but differ
|
Modified []*TableChange `json:"modified"` // Tables that exist in both but differ
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableChange represents changes within a table
|
// TableChange represents changes within a table
|
||||||
@@ -50,16 +50,16 @@ type ColumnDiff struct {
|
|||||||
|
|
||||||
// ColumnChange represents a modified column
|
// ColumnChange represents a modified column
|
||||||
type ColumnChange struct {
|
type ColumnChange struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Source *models.Column `json:"source"`
|
Source *models.Column `json:"source"`
|
||||||
Target *models.Column `json:"target"`
|
Target *models.Column `json:"target"`
|
||||||
Changes map[string]any `json:"changes"` // Map of field name to what changed
|
Changes map[string]any `json:"changes"` // Map of field name to what changed
|
||||||
}
|
}
|
||||||
|
|
||||||
// IndexDiff represents differences in indexes
|
// IndexDiff represents differences in indexes
|
||||||
type IndexDiff struct {
|
type IndexDiff struct {
|
||||||
Missing []*models.Index `json:"missing"` // Indexes in source but not in target
|
Missing []*models.Index `json:"missing"` // Indexes in source but not in target
|
||||||
Extra []*models.Index `json:"extra"` // Indexes in target but not in source
|
Extra []*models.Index `json:"extra"` // Indexes in target but not in source
|
||||||
Modified []*IndexChange `json:"modified"` // Indexes that exist in both but differ
|
Modified []*IndexChange `json:"modified"` // Indexes that exist in both but differ
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -103,8 +103,8 @@ type RelationshipChange struct {
|
|||||||
|
|
||||||
// ViewDiff represents differences in views
|
// ViewDiff represents differences in views
|
||||||
type ViewDiff struct {
|
type ViewDiff struct {
|
||||||
Missing []*models.View `json:"missing"` // Views in source but not in target
|
Missing []*models.View `json:"missing"` // Views in source but not in target
|
||||||
Extra []*models.View `json:"extra"` // Views in target but not in source
|
Extra []*models.View `json:"extra"` // Views in target but not in source
|
||||||
Modified []*ViewChange `json:"modified"` // Views that exist in both but differ
|
Modified []*ViewChange `json:"modified"` // Views that exist in both but differ
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,14 +133,14 @@ type SequenceChange struct {
|
|||||||
|
|
||||||
// Summary provides counts for quick overview
|
// Summary provides counts for quick overview
|
||||||
type Summary struct {
|
type Summary struct {
|
||||||
Schemas SchemaSummary `json:"schemas"`
|
Schemas SchemaSummary `json:"schemas"`
|
||||||
Tables TableSummary `json:"tables"`
|
Tables TableSummary `json:"tables"`
|
||||||
Columns ColumnSummary `json:"columns"`
|
Columns ColumnSummary `json:"columns"`
|
||||||
Indexes IndexSummary `json:"indexes"`
|
Indexes IndexSummary `json:"indexes"`
|
||||||
Constraints ConstraintSummary `json:"constraints"`
|
Constraints ConstraintSummary `json:"constraints"`
|
||||||
Relationships RelationshipSummary `json:"relationships"`
|
Relationships RelationshipSummary `json:"relationships"`
|
||||||
Views ViewSummary `json:"views"`
|
Views ViewSummary `json:"views"`
|
||||||
Sequences SequenceSummary `json:"sequences"`
|
Sequences SequenceSummary `json:"sequences"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SchemaSummary struct {
|
type SchemaSummary struct {
|
||||||
|
|||||||
@@ -2,7 +2,13 @@ package models
|
|||||||
|
|
||||||
import "encoding/xml"
|
import "encoding/xml"
|
||||||
|
|
||||||
// DCTXDictionary represents the root element of a DCTX file
|
// DCTX File Format Models
|
||||||
|
//
|
||||||
|
// This file defines the data structures for parsing and generating DCTX
|
||||||
|
// (Data Dictionary) XML files, which are used by Clarion development tools
|
||||||
|
// for database schema definitions.
|
||||||
|
|
||||||
|
// DCTXDictionary represents the root element of a DCTX file.
|
||||||
type DCTXDictionary struct {
|
type DCTXDictionary struct {
|
||||||
XMLName xml.Name `xml:"Dictionary"`
|
XMLName xml.Name `xml:"Dictionary"`
|
||||||
Name string `xml:"Name,attr"`
|
Name string `xml:"Name,attr"`
|
||||||
@@ -11,7 +17,7 @@ type DCTXDictionary struct {
|
|||||||
Relations []DCTXRelation `xml:"Relation,omitempty"`
|
Relations []DCTXRelation `xml:"Relation,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DCTXTable represents a table definition in DCTX
|
// DCTXTable represents a table definition in DCTX format.
|
||||||
type DCTXTable struct {
|
type DCTXTable struct {
|
||||||
Guid string `xml:"Guid,attr"`
|
Guid string `xml:"Guid,attr"`
|
||||||
Name string `xml:"Name,attr"`
|
Name string `xml:"Name,attr"`
|
||||||
@@ -25,7 +31,8 @@ type DCTXTable struct {
|
|||||||
Options []DCTXOption `xml:"Option,omitempty"`
|
Options []DCTXOption `xml:"Option,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DCTXField represents a field/column definition in DCTX
|
// DCTXField represents a field/column definition in DCTX format.
|
||||||
|
// Fields can be nested for GROUP structures.
|
||||||
type DCTXField struct {
|
type DCTXField struct {
|
||||||
Guid string `xml:"Guid,attr"`
|
Guid string `xml:"Guid,attr"`
|
||||||
Name string `xml:"Name,attr"`
|
Name string `xml:"Name,attr"`
|
||||||
@@ -37,7 +44,7 @@ type DCTXField struct {
|
|||||||
Options []DCTXOption `xml:"Option,omitempty"`
|
Options []DCTXOption `xml:"Option,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DCTXKey represents an index or key definition in DCTX
|
// DCTXKey represents an index or key definition in DCTX format.
|
||||||
type DCTXKey struct {
|
type DCTXKey struct {
|
||||||
Guid string `xml:"Guid,attr"`
|
Guid string `xml:"Guid,attr"`
|
||||||
Name string `xml:"Name,attr"`
|
Name string `xml:"Name,attr"`
|
||||||
@@ -49,7 +56,7 @@ type DCTXKey struct {
|
|||||||
Components []DCTXComponent `xml:"Component"`
|
Components []DCTXComponent `xml:"Component"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DCTXComponent represents a component of a key (field reference)
|
// DCTXComponent represents a component of a key, referencing a field in the index.
|
||||||
type DCTXComponent struct {
|
type DCTXComponent struct {
|
||||||
Guid string `xml:"Guid,attr"`
|
Guid string `xml:"Guid,attr"`
|
||||||
FieldId string `xml:"FieldId,attr,omitempty"`
|
FieldId string `xml:"FieldId,attr,omitempty"`
|
||||||
@@ -57,14 +64,14 @@ type DCTXComponent struct {
|
|||||||
Ascend bool `xml:"Ascend,attr,omitempty"`
|
Ascend bool `xml:"Ascend,attr,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DCTXOption represents a property option in DCTX
|
// DCTXOption represents a property option in DCTX format for metadata storage.
|
||||||
type DCTXOption struct {
|
type DCTXOption struct {
|
||||||
Property string `xml:"Property,attr"`
|
Property string `xml:"Property,attr"`
|
||||||
PropertyType string `xml:"PropertyType,attr,omitempty"`
|
PropertyType string `xml:"PropertyType,attr,omitempty"`
|
||||||
PropertyValue string `xml:"PropertyValue,attr"`
|
PropertyValue string `xml:"PropertyValue,attr"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DCTXRelation represents a relationship/foreign key in DCTX
|
// DCTXRelation represents a relationship/foreign key in DCTX format.
|
||||||
type DCTXRelation struct {
|
type DCTXRelation struct {
|
||||||
Guid string `xml:"Guid,attr"`
|
Guid string `xml:"Guid,attr"`
|
||||||
PrimaryTable string `xml:"PrimaryTable,attr"`
|
PrimaryTable string `xml:"PrimaryTable,attr"`
|
||||||
@@ -77,7 +84,7 @@ type DCTXRelation struct {
|
|||||||
PrimaryMappings []DCTXFieldMapping `xml:"PrimaryMapping,omitempty"`
|
PrimaryMappings []DCTXFieldMapping `xml:"PrimaryMapping,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DCTXFieldMapping represents a field mapping in a relation
|
// DCTXFieldMapping represents a field mapping in a relation for multi-column foreign keys.
|
||||||
type DCTXFieldMapping struct {
|
type DCTXFieldMapping struct {
|
||||||
Guid string `xml:"Guid,attr"`
|
Guid string `xml:"Guid,attr"`
|
||||||
Field string `xml:"Field,attr"`
|
Field string `xml:"Field,attr"`
|
||||||
|
|||||||
@@ -2,11 +2,14 @@ package models
|
|||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
// =============================================================================
|
// Flat/Denormalized Views
|
||||||
// Flat/Denormalized Views - Flattened structures with fully qualified names
|
//
|
||||||
// =============================================================================
|
// This file provides flattened data structures with fully qualified names
|
||||||
|
// for easier querying and analysis of database schemas without navigating
|
||||||
|
// nested hierarchies.
|
||||||
|
|
||||||
// FlatColumn represents a column with full context in a single structure
|
// FlatColumn represents a column with full database context in a single structure.
|
||||||
|
// It includes fully qualified names for easy identification and querying.
|
||||||
type FlatColumn struct {
|
type FlatColumn struct {
|
||||||
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
|
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
|
||||||
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
|
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
|
||||||
@@ -25,7 +28,7 @@ type FlatColumn struct {
|
|||||||
Comment string `json:"comment,omitempty" yaml:"comment,omitempty" xml:"comment,omitempty"`
|
Comment string `json:"comment,omitempty" yaml:"comment,omitempty" xml:"comment,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToFlatColumns converts a Database to a slice of FlatColumns
|
// ToFlatColumns converts a Database to a slice of FlatColumns for denormalized access to all columns.
|
||||||
func (d *Database) ToFlatColumns() []*FlatColumn {
|
func (d *Database) ToFlatColumns() []*FlatColumn {
|
||||||
flatColumns := make([]*FlatColumn, 0)
|
flatColumns := make([]*FlatColumn, 0)
|
||||||
|
|
||||||
@@ -56,7 +59,7 @@ func (d *Database) ToFlatColumns() []*FlatColumn {
|
|||||||
return flatColumns
|
return flatColumns
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlatTable represents a table with full context
|
// FlatTable represents a table with full database context and aggregated counts.
|
||||||
type FlatTable struct {
|
type FlatTable struct {
|
||||||
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
|
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
|
||||||
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
|
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
|
||||||
@@ -70,7 +73,7 @@ type FlatTable struct {
|
|||||||
IndexCount int `json:"index_count" yaml:"index_count" xml:"index_count"`
|
IndexCount int `json:"index_count" yaml:"index_count" xml:"index_count"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToFlatTables converts a Database to a slice of FlatTables
|
// ToFlatTables converts a Database to a slice of FlatTables for denormalized access to all tables.
|
||||||
func (d *Database) ToFlatTables() []*FlatTable {
|
func (d *Database) ToFlatTables() []*FlatTable {
|
||||||
flatTables := make([]*FlatTable, 0)
|
flatTables := make([]*FlatTable, 0)
|
||||||
|
|
||||||
@@ -94,7 +97,7 @@ func (d *Database) ToFlatTables() []*FlatTable {
|
|||||||
return flatTables
|
return flatTables
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlatConstraint represents a constraint with full context
|
// FlatConstraint represents a constraint with full database context and resolved references.
|
||||||
type FlatConstraint struct {
|
type FlatConstraint struct {
|
||||||
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
|
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
|
||||||
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
|
SchemaName string `json:"schema_name" yaml:"schema_name" xml:"schema_name"`
|
||||||
@@ -112,7 +115,7 @@ type FlatConstraint struct {
|
|||||||
OnUpdate string `json:"on_update,omitempty" yaml:"on_update,omitempty" xml:"on_update,omitempty"`
|
OnUpdate string `json:"on_update,omitempty" yaml:"on_update,omitempty" xml:"on_update,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToFlatConstraints converts a Database to a slice of FlatConstraints
|
// ToFlatConstraints converts a Database to a slice of FlatConstraints for denormalized access to all constraints.
|
||||||
func (d *Database) ToFlatConstraints() []*FlatConstraint {
|
func (d *Database) ToFlatConstraints() []*FlatConstraint {
|
||||||
flatConstraints := make([]*FlatConstraint, 0)
|
flatConstraints := make([]*FlatConstraint, 0)
|
||||||
|
|
||||||
@@ -148,7 +151,7 @@ func (d *Database) ToFlatConstraints() []*FlatConstraint {
|
|||||||
return flatConstraints
|
return flatConstraints
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlatRelationship represents a relationship with full context
|
// FlatRelationship represents a relationship with full database context and fully qualified table names.
|
||||||
type FlatRelationship struct {
|
type FlatRelationship struct {
|
||||||
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
|
DatabaseName string `json:"database_name" yaml:"database_name" xml:"database_name"`
|
||||||
RelationshipName string `json:"relationship_name" yaml:"relationship_name" xml:"relationship_name"`
|
RelationshipName string `json:"relationship_name" yaml:"relationship_name" xml:"relationship_name"`
|
||||||
@@ -164,7 +167,7 @@ type FlatRelationship struct {
|
|||||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToFlatRelationships converts a Database to a slice of FlatRelationships
|
// ToFlatRelationships converts a Database to a slice of FlatRelationships for denormalized access to all relationships.
|
||||||
func (d *Database) ToFlatRelationships() []*FlatRelationship {
|
func (d *Database) ToFlatRelationships() []*FlatRelationship {
|
||||||
flatRelationships := make([]*FlatRelationship, 0)
|
flatRelationships := make([]*FlatRelationship, 0)
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,19 @@
|
|||||||
|
// Package models provides the core data structures for representing database schemas.
|
||||||
|
// It defines types for databases, schemas, tables, columns, relationships, constraints,
|
||||||
|
// indexes, views, sequences, and other database objects. These models serve as the
|
||||||
|
// intermediate representation for converting between various database schema formats.
|
||||||
package models
|
package models
|
||||||
|
|
||||||
import "strings"
|
import "strings"
|
||||||
|
|
||||||
|
// DatabaseType represents the type of database system.
|
||||||
type DatabaseType string
|
type DatabaseType string
|
||||||
|
|
||||||
|
// Supported database types.
|
||||||
const (
|
const (
|
||||||
PostgresqlDatabaseType DatabaseType = "pgsql"
|
PostgresqlDatabaseType DatabaseType = "pgsql" // PostgreSQL database
|
||||||
MSSQLDatabaseType DatabaseType = "mssql"
|
MSSQLDatabaseType DatabaseType = "mssql" // Microsoft SQL Server database
|
||||||
SqlLiteDatabaseType DatabaseType = "sqlite"
|
SqlLiteDatabaseType DatabaseType = "sqlite" // SQLite database
|
||||||
)
|
)
|
||||||
|
|
||||||
// Database represents the complete database schema
|
// Database represents the complete database schema
|
||||||
@@ -21,11 +27,13 @@ type Database struct {
|
|||||||
SourceFormat string `json:"source_format,omitempty" yaml:"source_format,omitempty" xml:"source_format,omitempty"` // Source Format of the database.
|
SourceFormat string `json:"source_format,omitempty" yaml:"source_format,omitempty" xml:"source_format,omitempty"` // Source Format of the database.
|
||||||
}
|
}
|
||||||
|
|
||||||
// SQLNamer returns the database name in lowercase
|
// SQLName returns the database name in lowercase for SQL compatibility.
|
||||||
func (d *Database) SQLName() string {
|
func (d *Database) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Schema represents a database schema, which is a logical grouping of database objects
|
||||||
|
// such as tables, views, sequences, and relationships within a database.
|
||||||
type Schema struct {
|
type Schema struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||||
@@ -40,13 +48,16 @@ type Schema struct {
|
|||||||
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
||||||
RefDatabase *Database `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
|
RefDatabase *Database `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
|
||||||
Relations []*Relationship `json:"relations,omitempty" yaml:"relations,omitempty" xml:"-"`
|
Relations []*Relationship `json:"relations,omitempty" yaml:"relations,omitempty" xml:"-"`
|
||||||
|
Enums []*Enum `json:"enums,omitempty" yaml:"enums,omitempty" xml:"enums"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SQLName returns the schema name in lowercase
|
// SQLName returns the schema name in lowercase for SQL compatibility.
|
||||||
func (d *Schema) SQLName() string {
|
func (d *Schema) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Table represents a database table with its columns, constraints, indexes,
|
||||||
|
// and relationships. Tables are the primary data storage structures in a database.
|
||||||
type Table struct {
|
type Table struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||||
@@ -62,11 +73,12 @@ type Table struct {
|
|||||||
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
|
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
|
||||||
}
|
}
|
||||||
|
|
||||||
// SQLName returns the table name in lowercase
|
// SQLName returns the table name in lowercase for SQL compatibility.
|
||||||
func (d *Table) SQLName() string {
|
func (d *Table) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPrimaryKey returns the primary key column for the table, or nil if none exists.
|
||||||
func (m Table) GetPrimaryKey() *Column {
|
func (m Table) GetPrimaryKey() *Column {
|
||||||
for _, column := range m.Columns {
|
for _, column := range m.Columns {
|
||||||
if column.IsPrimaryKey {
|
if column.IsPrimaryKey {
|
||||||
@@ -76,6 +88,7 @@ func (m Table) GetPrimaryKey() *Column {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetForeignKeys returns all foreign key constraints for the table.
|
||||||
func (m Table) GetForeignKeys() []*Constraint {
|
func (m Table) GetForeignKeys() []*Constraint {
|
||||||
keys := make([]*Constraint, 0)
|
keys := make([]*Constraint, 0)
|
||||||
|
|
||||||
@@ -100,7 +113,7 @@ type View struct {
|
|||||||
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
|
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
|
||||||
}
|
}
|
||||||
|
|
||||||
// SQLName returns the view name in lowercase
|
// SQLName returns the view name in lowercase for SQL compatibility.
|
||||||
func (d *View) SQLName() string {
|
func (d *View) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
@@ -123,7 +136,7 @@ type Sequence struct {
|
|||||||
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
|
RefSchema *Schema `json:"-" yaml:"-" xml:"-"` // Excluded to prevent circular references
|
||||||
}
|
}
|
||||||
|
|
||||||
// SQLName returns the sequence name in lowercase
|
// SQLName returns the sequence name in lowercase for SQL compatibility.
|
||||||
func (d *Sequence) SQLName() string {
|
func (d *Sequence) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
@@ -147,11 +160,13 @@ type Column struct {
|
|||||||
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SQLName returns the table name in lowercase
|
// SQLName returns the column name in lowercase for SQL compatibility.
|
||||||
func (d *Column) SQLName() string {
|
func (d *Column) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Index represents a database index for optimizing query performance.
|
||||||
|
// Indexes can be unique, partial, or include additional columns.
|
||||||
type Index struct {
|
type Index struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||||
@@ -167,19 +182,23 @@ type Index struct {
|
|||||||
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SQLName returns the Indexin lowercase
|
// SQLName returns the index name in lowercase for SQL compatibility.
|
||||||
func (d *Index) SQLName() string {
|
func (d *Index) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RelationType represents the type of relationship between database tables.
|
||||||
type RelationType string
|
type RelationType string
|
||||||
|
|
||||||
|
// Supported relationship types.
|
||||||
const (
|
const (
|
||||||
OneToOne RelationType = "one_to_one"
|
OneToOne RelationType = "one_to_one" // One record in table A relates to one record in table B
|
||||||
OneToMany RelationType = "one_to_many"
|
OneToMany RelationType = "one_to_many" // One record in table A relates to many records in table B
|
||||||
ManyToMany RelationType = "many_to_many"
|
ManyToMany RelationType = "many_to_many" // Many records in table A relate to many records in table B
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Relationship represents a relationship between two database tables.
|
||||||
|
// Relationships can be one-to-one, one-to-many, or many-to-many.
|
||||||
type Relationship struct {
|
type Relationship struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Type RelationType `json:"type" yaml:"type" xml:"type"`
|
Type RelationType `json:"type" yaml:"type" xml:"type"`
|
||||||
@@ -197,11 +216,13 @@ type Relationship struct {
|
|||||||
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SQLName returns the Relationship lowercase
|
// SQLName returns the relationship name in lowercase for SQL compatibility.
|
||||||
func (d *Relationship) SQLName() string {
|
func (d *Relationship) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Constraint represents a database constraint that enforces data integrity rules.
|
||||||
|
// Constraints can be primary keys, foreign keys, unique constraints, check constraints, or not-null constraints.
|
||||||
type Constraint struct {
|
type Constraint struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Type ConstraintType `json:"type" yaml:"type" xml:"type"`
|
Type ConstraintType `json:"type" yaml:"type" xml:"type"`
|
||||||
@@ -219,20 +240,37 @@ type Constraint struct {
|
|||||||
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SQLName returns the constraint name in lowercase for SQL compatibility.
|
||||||
func (d *Constraint) SQLName() string {
|
func (d *Constraint) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConstraintType represents the type of database constraint.
|
||||||
type ConstraintType string
|
type ConstraintType string
|
||||||
|
|
||||||
|
// Enum represents a database enumeration type with a set of allowed values.
|
||||||
|
type Enum struct {
|
||||||
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
|
Values []string `json:"values" yaml:"values" xml:"values"`
|
||||||
|
Schema string `json:"schema,omitempty" yaml:"schema,omitempty" xml:"schema,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SQLName returns the enum name in lowercase for SQL compatibility.
|
||||||
|
func (d *Enum) SQLName() string {
|
||||||
|
return strings.ToLower(d.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Supported constraint types.
|
||||||
const (
|
const (
|
||||||
PrimaryKeyConstraint ConstraintType = "primary_key"
|
PrimaryKeyConstraint ConstraintType = "primary_key" // Primary key uniquely identifies each record
|
||||||
ForeignKeyConstraint ConstraintType = "foreign_key"
|
ForeignKeyConstraint ConstraintType = "foreign_key" // Foreign key references another table
|
||||||
UniqueConstraint ConstraintType = "unique"
|
UniqueConstraint ConstraintType = "unique" // Unique constraint ensures all values are different
|
||||||
CheckConstraint ConstraintType = "check"
|
CheckConstraint ConstraintType = "check" // Check constraint validates data against an expression
|
||||||
NotNullConstraint ConstraintType = "not_null"
|
NotNullConstraint ConstraintType = "not_null" // Not null constraint requires a value
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Script represents a database migration or initialization script.
|
||||||
|
// Scripts can have dependencies and rollback capabilities.
|
||||||
type Script struct {
|
type Script struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Description string `json:"description" yaml:"description" xml:"description"`
|
Description string `json:"description" yaml:"description" xml:"description"`
|
||||||
@@ -245,11 +283,12 @@ type Script struct {
|
|||||||
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SQLName returns the script name in lowercase for SQL compatibility.
|
||||||
func (d *Script) SQLName() string {
|
func (d *Script) SQLName() string {
|
||||||
return strings.ToLower(d.Name)
|
return strings.ToLower(d.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize functions
|
// Initialization functions for creating new model instances with proper defaults.
|
||||||
|
|
||||||
// InitDatabase initializes a new Database with empty slices
|
// InitDatabase initializes a new Database with empty slices
|
||||||
func InitDatabase(name string) *Database {
|
func InitDatabase(name string) *Database {
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
package models
|
package models
|
||||||
|
|
||||||
// =============================================================================
|
// Summary/Compact Views
|
||||||
// Summary/Compact Views - Lightweight views with essential fields
|
//
|
||||||
// =============================================================================
|
// This file provides lightweight summary structures with essential fields
|
||||||
|
// and aggregated counts for quick database schema overviews without loading
|
||||||
|
// full object graphs.
|
||||||
|
|
||||||
// DatabaseSummary provides a compact overview of a database
|
// DatabaseSummary provides a compact overview of a database with aggregated statistics.
|
||||||
type DatabaseSummary struct {
|
type DatabaseSummary struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||||
@@ -15,7 +17,7 @@ type DatabaseSummary struct {
|
|||||||
TotalColumns int `json:"total_columns" yaml:"total_columns" xml:"total_columns"`
|
TotalColumns int `json:"total_columns" yaml:"total_columns" xml:"total_columns"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToSummary converts a Database to a DatabaseSummary
|
// ToSummary converts a Database to a DatabaseSummary with calculated counts.
|
||||||
func (d *Database) ToSummary() *DatabaseSummary {
|
func (d *Database) ToSummary() *DatabaseSummary {
|
||||||
summary := &DatabaseSummary{
|
summary := &DatabaseSummary{
|
||||||
Name: d.Name,
|
Name: d.Name,
|
||||||
@@ -36,7 +38,7 @@ func (d *Database) ToSummary() *DatabaseSummary {
|
|||||||
return summary
|
return summary
|
||||||
}
|
}
|
||||||
|
|
||||||
// SchemaSummary provides a compact overview of a schema
|
// SchemaSummary provides a compact overview of a schema with aggregated statistics.
|
||||||
type SchemaSummary struct {
|
type SchemaSummary struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||||
@@ -47,7 +49,7 @@ type SchemaSummary struct {
|
|||||||
TotalConstraints int `json:"total_constraints" yaml:"total_constraints" xml:"total_constraints"`
|
TotalConstraints int `json:"total_constraints" yaml:"total_constraints" xml:"total_constraints"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToSummary converts a Schema to a SchemaSummary
|
// ToSummary converts a Schema to a SchemaSummary with calculated counts.
|
||||||
func (s *Schema) ToSummary() *SchemaSummary {
|
func (s *Schema) ToSummary() *SchemaSummary {
|
||||||
summary := &SchemaSummary{
|
summary := &SchemaSummary{
|
||||||
Name: s.Name,
|
Name: s.Name,
|
||||||
@@ -66,7 +68,7 @@ func (s *Schema) ToSummary() *SchemaSummary {
|
|||||||
return summary
|
return summary
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableSummary provides a compact overview of a table
|
// TableSummary provides a compact overview of a table with aggregated statistics.
|
||||||
type TableSummary struct {
|
type TableSummary struct {
|
||||||
Name string `json:"name" yaml:"name" xml:"name"`
|
Name string `json:"name" yaml:"name" xml:"name"`
|
||||||
Schema string `json:"schema" yaml:"schema" xml:"schema"`
|
Schema string `json:"schema" yaml:"schema" xml:"schema"`
|
||||||
@@ -79,7 +81,7 @@ type TableSummary struct {
|
|||||||
ForeignKeyCount int `json:"foreign_key_count" yaml:"foreign_key_count" xml:"foreign_key_count"`
|
ForeignKeyCount int `json:"foreign_key_count" yaml:"foreign_key_count" xml:"foreign_key_count"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToSummary converts a Table to a TableSummary
|
// ToSummary converts a Table to a TableSummary with calculated counts.
|
||||||
func (t *Table) ToSummary() *TableSummary {
|
func (t *Table) ToSummary() *TableSummary {
|
||||||
summary := &TableSummary{
|
summary := &TableSummary{
|
||||||
Name: t.Name,
|
Name: t.Name,
|
||||||
|
|||||||
106
pkg/readers/bun/README.md
Normal file
106
pkg/readers/bun/README.md
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
# Bun Reader
|
||||||
|
|
||||||
|
Reads Go source files containing Bun model definitions and extracts database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Bun Reader parses Go source code files that define Bun models (structs with `bun` struct tags) and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Parses Bun struct tags to extract column definitions
|
||||||
|
- Extracts table names from `bun:"table:tablename"` tags
|
||||||
|
- Identifies primary keys, foreign keys, and indexes
|
||||||
|
- Supports relationship detection
|
||||||
|
- Handles both single files and directories
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/models.go",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bun.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read Bun models and convert to JSON
|
||||||
|
relspec --input bun --in-file models/ --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert Bun models to GORM
|
||||||
|
relspec --input bun --in-file models.go --output gorm --out-file gorm_models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Bun Tags
|
||||||
|
|
||||||
|
The reader recognizes the following Bun struct tags:
|
||||||
|
|
||||||
|
- `table` - Table name
|
||||||
|
- `column` - Column name
|
||||||
|
- `type` - SQL data type
|
||||||
|
- `pk` - Primary key
|
||||||
|
- `notnull` - NOT NULL constraint
|
||||||
|
- `autoincrement` - Auto-increment column
|
||||||
|
- `default` - Default value
|
||||||
|
- `unique` - Unique constraint
|
||||||
|
- `rel` - Relationship definition
|
||||||
|
|
||||||
|
## Example Bun Model
|
||||||
|
|
||||||
|
```go
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
bun.BaseModel `bun:"table:users,alias:u"`
|
||||||
|
|
||||||
|
ID int64 `bun:"id,pk,autoincrement"`
|
||||||
|
Username string `bun:"username,notnull,unique"`
|
||||||
|
Email string `bun:"email,notnull"`
|
||||||
|
CreatedAt time.Time `bun:"created_at,notnull,default:now()"`
|
||||||
|
|
||||||
|
Posts []*Post `bun:"rel:has-many,join:id=user_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Post struct {
|
||||||
|
bun.BaseModel `bun:"table:posts,alias:p"`
|
||||||
|
|
||||||
|
ID int64 `bun:"id,pk"`
|
||||||
|
UserID int64 `bun:"user_id,notnull"`
|
||||||
|
Title string `bun:"title,notnull"`
|
||||||
|
Content string `bun:"content"`
|
||||||
|
|
||||||
|
User *User `bun:"rel:belongs-to,join:user_id=id"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Test files (ending in `_test.go`) are automatically excluded
|
||||||
|
- The `bun.BaseModel` embedded struct is automatically recognized
|
||||||
|
- Schema defaults to `public` if not specified
|
||||||
@@ -382,6 +382,23 @@ func (r *Reader) isRelationship(tag string) bool {
|
|||||||
return strings.Contains(tag, "bun:\"rel:") || strings.Contains(tag, ",rel:")
|
return strings.Contains(tag, "bun:\"rel:") || strings.Contains(tag, ",rel:")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getRelationType extracts the relationship type from a bun tag
|
||||||
|
func (r *Reader) getRelationType(bunTag string) string {
|
||||||
|
if strings.Contains(bunTag, "rel:has-many") {
|
||||||
|
return "has-many"
|
||||||
|
}
|
||||||
|
if strings.Contains(bunTag, "rel:belongs-to") {
|
||||||
|
return "belongs-to"
|
||||||
|
}
|
||||||
|
if strings.Contains(bunTag, "rel:has-one") {
|
||||||
|
return "has-one"
|
||||||
|
}
|
||||||
|
if strings.Contains(bunTag, "rel:many-to-many") {
|
||||||
|
return "many-to-many"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
// parseRelationshipConstraints parses relationship fields to extract foreign key constraints
|
// parseRelationshipConstraints parses relationship fields to extract foreign key constraints
|
||||||
func (r *Reader) parseRelationshipConstraints(table *models.Table, structType *ast.StructType, structMap map[string]*models.Table) {
|
func (r *Reader) parseRelationshipConstraints(table *models.Table, structType *ast.StructType, structMap map[string]*models.Table) {
|
||||||
for _, field := range structType.Fields.List {
|
for _, field := range structType.Fields.List {
|
||||||
@@ -409,27 +426,51 @@ func (r *Reader) parseRelationshipConstraints(table *models.Table, structType *a
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse the join information: join:user_id=id
|
// Parse the join information: join:user_id=id
|
||||||
// This means: referencedTable.user_id = thisTable.id
|
// This means: thisTable.user_id = referencedTable.id
|
||||||
joinInfo := r.parseJoinInfo(bunTag)
|
joinInfo := r.parseJoinInfo(bunTag)
|
||||||
if joinInfo == nil {
|
if joinInfo == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// The FK is on the referenced table
|
// Determine which table gets the FK based on relationship type
|
||||||
|
relType := r.getRelationType(bunTag)
|
||||||
|
|
||||||
|
var fkTable *models.Table
|
||||||
|
var fkColumn, refTable, refColumn string
|
||||||
|
|
||||||
|
switch strings.ToLower(relType) {
|
||||||
|
case "belongs-to":
|
||||||
|
// For belongs-to: FK is on the current table
|
||||||
|
// join:user_id=id means table.user_id references referencedTable.id
|
||||||
|
fkTable = table
|
||||||
|
fkColumn = joinInfo.ForeignKey
|
||||||
|
refTable = referencedTable.Name
|
||||||
|
refColumn = joinInfo.ReferencedKey
|
||||||
|
case "has-many":
|
||||||
|
// For has-many: FK is on the referenced table
|
||||||
|
// join:id=user_id means referencedTable.user_id references table.id
|
||||||
|
fkTable = referencedTable
|
||||||
|
fkColumn = joinInfo.ReferencedKey
|
||||||
|
refTable = table.Name
|
||||||
|
refColumn = joinInfo.ForeignKey
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
constraint := &models.Constraint{
|
constraint := &models.Constraint{
|
||||||
Name: fmt.Sprintf("fk_%s_%s", referencedTable.Name, table.Name),
|
Name: fmt.Sprintf("fk_%s_%s", fkTable.Name, refTable),
|
||||||
Type: models.ForeignKeyConstraint,
|
Type: models.ForeignKeyConstraint,
|
||||||
Table: referencedTable.Name,
|
Table: fkTable.Name,
|
||||||
Schema: referencedTable.Schema,
|
Schema: fkTable.Schema,
|
||||||
Columns: []string{joinInfo.ForeignKey},
|
Columns: []string{fkColumn},
|
||||||
ReferencedTable: table.Name,
|
ReferencedTable: refTable,
|
||||||
ReferencedSchema: table.Schema,
|
ReferencedSchema: fkTable.Schema,
|
||||||
ReferencedColumns: []string{joinInfo.ReferencedKey},
|
ReferencedColumns: []string{refColumn},
|
||||||
OnDelete: "NO ACTION", // Bun doesn't specify this in tags
|
OnDelete: "NO ACTION", // Bun doesn't specify this in tags
|
||||||
OnUpdate: "NO ACTION",
|
OnUpdate: "NO ACTION",
|
||||||
}
|
}
|
||||||
|
|
||||||
referencedTable.Constraints[constraint.Name] = constraint
|
fkTable.Constraints[constraint.Name] = constraint
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -626,17 +667,14 @@ func (r *Reader) parseColumn(fieldName string, fieldType ast.Expr, tag string, s
|
|||||||
// - nullzero tag means the field is nullable (can be NULL in DB)
|
// - nullzero tag means the field is nullable (can be NULL in DB)
|
||||||
// - absence of nullzero means the field is NOT NULL
|
// - absence of nullzero means the field is NOT NULL
|
||||||
// - primitive types (int64, bool, string) are NOT NULL by default
|
// - primitive types (int64, bool, string) are NOT NULL by default
|
||||||
|
column.NotNull = true
|
||||||
|
// Primary keys are always NOT NULL
|
||||||
|
|
||||||
if strings.Contains(bunTag, "nullzero") {
|
if strings.Contains(bunTag, "nullzero") {
|
||||||
column.NotNull = false
|
column.NotNull = false
|
||||||
} else if r.isNullableGoType(fieldType) {
|
|
||||||
// SqlString, SqlInt, etc. without nullzero tag means NOT NULL
|
|
||||||
column.NotNull = true
|
|
||||||
} else {
|
} else {
|
||||||
// Primitive types are NOT NULL by default
|
column.NotNull = !r.isNullableGoType(fieldType)
|
||||||
column.NotNull = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Primary keys are always NOT NULL
|
|
||||||
if column.IsPrimaryKey {
|
if column.IsPrimaryKey {
|
||||||
column.NotNull = true
|
column.NotNull = true
|
||||||
}
|
}
|
||||||
|
|||||||
522
pkg/readers/bun/reader_test.go
Normal file
522
pkg/readers/bun/reader_test.go
Normal file
@@ -0,0 +1,522 @@
|
|||||||
|
package bun
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_Simple(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "bun", "simple.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if db == nil {
|
||||||
|
t.Fatal("ReadDatabase() returned nil database")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
t.Fatal("Expected at least one schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
if schema.Name != "public" {
|
||||||
|
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) != 1 {
|
||||||
|
t.Fatalf("Expected 1 table, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
|
||||||
|
table := schema.Tables[0]
|
||||||
|
if table.Name != "users" {
|
||||||
|
t.Errorf("Expected table name 'users', got '%s'", table.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(table.Columns) != 6 {
|
||||||
|
t.Errorf("Expected 6 columns, got %d", len(table.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify id column - primary key should be NOT NULL
|
||||||
|
idCol, exists := table.Columns["id"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'id' not found")
|
||||||
|
}
|
||||||
|
if !idCol.IsPrimaryKey {
|
||||||
|
t.Error("Column 'id' should be primary key")
|
||||||
|
}
|
||||||
|
if !idCol.AutoIncrement {
|
||||||
|
t.Error("Column 'id' should be auto-increment")
|
||||||
|
}
|
||||||
|
if !idCol.NotNull {
|
||||||
|
t.Error("Column 'id' should be NOT NULL (primary keys are always NOT NULL)")
|
||||||
|
}
|
||||||
|
if idCol.Type != "bigint" {
|
||||||
|
t.Errorf("Expected id type 'bigint', got '%s'", idCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify email column - explicit notnull tag should be NOT NULL
|
||||||
|
emailCol, exists := table.Columns["email"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'email' not found")
|
||||||
|
}
|
||||||
|
if !emailCol.NotNull {
|
||||||
|
t.Error("Column 'email' should be NOT NULL (explicit 'notnull' tag)")
|
||||||
|
}
|
||||||
|
if emailCol.Type != "varchar" || emailCol.Length != 255 {
|
||||||
|
t.Errorf("Expected email type 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify name column - primitive string type should be NOT NULL by default in Bun
|
||||||
|
nameCol, exists := table.Columns["name"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'name' not found")
|
||||||
|
}
|
||||||
|
if !nameCol.NotNull {
|
||||||
|
t.Error("Column 'name' should be NOT NULL (primitive string type, no nullzero tag)")
|
||||||
|
}
|
||||||
|
if nameCol.Type != "text" {
|
||||||
|
t.Errorf("Expected name type 'text', got '%s'", nameCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify age column - pointer type should be nullable (NOT NULL = false)
|
||||||
|
ageCol, exists := table.Columns["age"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'age' not found")
|
||||||
|
}
|
||||||
|
if ageCol.NotNull {
|
||||||
|
t.Error("Column 'age' should be nullable (pointer type *int)")
|
||||||
|
}
|
||||||
|
if ageCol.Type != "integer" {
|
||||||
|
t.Errorf("Expected age type 'integer', got '%s'", ageCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify is_active column - primitive bool type should be NOT NULL by default
|
||||||
|
isActiveCol, exists := table.Columns["is_active"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'is_active' not found")
|
||||||
|
}
|
||||||
|
if !isActiveCol.NotNull {
|
||||||
|
t.Error("Column 'is_active' should be NOT NULL (primitive bool type, no nullzero tag)")
|
||||||
|
}
|
||||||
|
if isActiveCol.Type != "boolean" {
|
||||||
|
t.Errorf("Expected is_active type 'boolean', got '%s'", isActiveCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify created_at column - time.Time should be NOT NULL by default
|
||||||
|
createdAtCol, exists := table.Columns["created_at"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'created_at' not found")
|
||||||
|
}
|
||||||
|
if !createdAtCol.NotNull {
|
||||||
|
t.Error("Column 'created_at' should be NOT NULL (time.Time is NOT NULL by default)")
|
||||||
|
}
|
||||||
|
if createdAtCol.Type != "timestamp" {
|
||||||
|
t.Errorf("Expected created_at type 'timestamp', got '%s'", createdAtCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify unique index on email
|
||||||
|
if len(table.Indexes) < 1 {
|
||||||
|
t.Error("Expected at least 1 index on users table")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_Complex(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "bun", "complex.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if db == nil {
|
||||||
|
t.Fatal("ReadDatabase() returned nil database")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify schema
|
||||||
|
if len(db.Schemas) != 1 {
|
||||||
|
t.Fatalf("Expected 1 schema, got %d", len(db.Schemas))
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
if schema.Name != "public" {
|
||||||
|
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify tables
|
||||||
|
if len(schema.Tables) != 3 {
|
||||||
|
t.Fatalf("Expected 3 tables, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find tables
|
||||||
|
var usersTable, postsTable, commentsTable *models.Table
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
switch table.Name {
|
||||||
|
case "users":
|
||||||
|
usersTable = table
|
||||||
|
case "posts":
|
||||||
|
postsTable = table
|
||||||
|
case "comments":
|
||||||
|
commentsTable = table
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if usersTable == nil {
|
||||||
|
t.Fatal("Users table not found")
|
||||||
|
}
|
||||||
|
if postsTable == nil {
|
||||||
|
t.Fatal("Posts table not found")
|
||||||
|
}
|
||||||
|
if commentsTable == nil {
|
||||||
|
t.Fatal("Comments table not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify users table - test NOT NULL logic for various field types
|
||||||
|
if len(usersTable.Columns) != 10 {
|
||||||
|
t.Errorf("Expected 10 columns in users table, got %d", len(usersTable.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
// username - NOT NULL (explicit notnull tag)
|
||||||
|
usernameCol, exists := usersTable.Columns["username"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'username' not found")
|
||||||
|
}
|
||||||
|
if !usernameCol.NotNull {
|
||||||
|
t.Error("Column 'username' should be NOT NULL (explicit 'notnull' tag)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// first_name - nullable (pointer type)
|
||||||
|
firstNameCol, exists := usersTable.Columns["first_name"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'first_name' not found")
|
||||||
|
}
|
||||||
|
if firstNameCol.NotNull {
|
||||||
|
t.Error("Column 'first_name' should be nullable (pointer type *string)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// last_name - nullable (pointer type)
|
||||||
|
lastNameCol, exists := usersTable.Columns["last_name"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'last_name' not found")
|
||||||
|
}
|
||||||
|
if lastNameCol.NotNull {
|
||||||
|
t.Error("Column 'last_name' should be nullable (pointer type *string)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// bio - nullable (pointer type)
|
||||||
|
bioCol, exists := usersTable.Columns["bio"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'bio' not found")
|
||||||
|
}
|
||||||
|
if bioCol.NotNull {
|
||||||
|
t.Error("Column 'bio' should be nullable (pointer type *string)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// is_active - NOT NULL (primitive bool without nullzero)
|
||||||
|
isActiveCol, exists := usersTable.Columns["is_active"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'is_active' not found")
|
||||||
|
}
|
||||||
|
if !isActiveCol.NotNull {
|
||||||
|
t.Error("Column 'is_active' should be NOT NULL (primitive bool type without nullzero)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify users table indexes
|
||||||
|
if len(usersTable.Indexes) < 1 {
|
||||||
|
t.Error("Expected at least 1 index on users table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify posts table
|
||||||
|
if len(postsTable.Columns) != 11 {
|
||||||
|
t.Errorf("Expected 11 columns in posts table, got %d", len(postsTable.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
// excerpt - nullable (pointer type)
|
||||||
|
excerptCol, exists := postsTable.Columns["excerpt"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'excerpt' not found")
|
||||||
|
}
|
||||||
|
if excerptCol.NotNull {
|
||||||
|
t.Error("Column 'excerpt' should be nullable (pointer type *string)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// published - NOT NULL (primitive bool without nullzero)
|
||||||
|
publishedCol, exists := postsTable.Columns["published"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'published' not found")
|
||||||
|
}
|
||||||
|
if !publishedCol.NotNull {
|
||||||
|
t.Error("Column 'published' should be NOT NULL (primitive bool type without nullzero)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// published_at - nullable (has nullzero tag)
|
||||||
|
publishedAtCol, exists := postsTable.Columns["published_at"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'published_at' not found")
|
||||||
|
}
|
||||||
|
if publishedAtCol.NotNull {
|
||||||
|
t.Error("Column 'published_at' should be nullable (has nullzero tag)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// view_count - NOT NULL (primitive int64 without nullzero)
|
||||||
|
viewCountCol, exists := postsTable.Columns["view_count"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'view_count' not found")
|
||||||
|
}
|
||||||
|
if !viewCountCol.NotNull {
|
||||||
|
t.Error("Column 'view_count' should be NOT NULL (primitive int64 type without nullzero)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify posts table indexes
|
||||||
|
if len(postsTable.Indexes) < 1 {
|
||||||
|
t.Error("Expected at least 1 index on posts table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify comments table
|
||||||
|
if len(commentsTable.Columns) != 6 {
|
||||||
|
t.Errorf("Expected 6 columns in comments table, got %d", len(commentsTable.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
// user_id - nullable (pointer type)
|
||||||
|
userIDCol, exists := commentsTable.Columns["user_id"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'user_id' not found in comments table")
|
||||||
|
}
|
||||||
|
if userIDCol.NotNull {
|
||||||
|
t.Error("Column 'user_id' should be nullable (pointer type *int64)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// post_id - NOT NULL (explicit notnull tag)
|
||||||
|
postIDCol, exists := commentsTable.Columns["post_id"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'post_id' not found in comments table")
|
||||||
|
}
|
||||||
|
if !postIDCol.NotNull {
|
||||||
|
t.Error("Column 'post_id' should be NOT NULL (explicit 'notnull' tag)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify foreign key constraints are created from relationship tags
|
||||||
|
// In Bun, relationships are defined with rel: tags
|
||||||
|
// The constraints should be created on the referenced tables
|
||||||
|
if len(postsTable.Constraints) > 0 {
|
||||||
|
// Check if FK constraint exists
|
||||||
|
var fkPostsUser *models.Constraint
|
||||||
|
for _, c := range postsTable.Constraints {
|
||||||
|
if c.Type == models.ForeignKeyConstraint && c.ReferencedTable == "users" {
|
||||||
|
fkPostsUser = c
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fkPostsUser != nil {
|
||||||
|
if len(fkPostsUser.Columns) != 1 || fkPostsUser.Columns[0] != "user_id" {
|
||||||
|
t.Error("Expected FK column 'user_id'")
|
||||||
|
}
|
||||||
|
if len(fkPostsUser.ReferencedColumns) != 1 || fkPostsUser.ReferencedColumns[0] != "id" {
|
||||||
|
t.Error("Expected FK referenced column 'id'")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(commentsTable.Constraints) > 0 {
|
||||||
|
// Check if FK constraints exist
|
||||||
|
var fkCommentsPost, fkCommentsUser *models.Constraint
|
||||||
|
for _, c := range commentsTable.Constraints {
|
||||||
|
if c.Type == models.ForeignKeyConstraint {
|
||||||
|
if c.ReferencedTable == "posts" {
|
||||||
|
fkCommentsPost = c
|
||||||
|
} else if c.ReferencedTable == "users" {
|
||||||
|
fkCommentsUser = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fkCommentsPost != nil {
|
||||||
|
if len(fkCommentsPost.Columns) != 1 || fkCommentsPost.Columns[0] != "post_id" {
|
||||||
|
t.Error("Expected FK column 'post_id'")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fkCommentsUser != nil {
|
||||||
|
if len(fkCommentsUser.Columns) != 1 || fkCommentsUser.Columns[0] != "user_id" {
|
||||||
|
t.Error("Expected FK column 'user_id'")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadSchema(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "bun", "simple.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
schema, err := reader.ReadSchema()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadSchema() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if schema == nil {
|
||||||
|
t.Fatal("ReadSchema() returned nil schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
if schema.Name != "public" {
|
||||||
|
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) != 1 {
|
||||||
|
t.Errorf("Expected 1 table, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadTable(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "bun", "simple.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
table, err := reader.ReadTable()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadTable() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if table == nil {
|
||||||
|
t.Fatal("ReadTable() returned nil table")
|
||||||
|
}
|
||||||
|
|
||||||
|
if table.Name != "users" {
|
||||||
|
t.Errorf("Expected table name 'users', got '%s'", table.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(table.Columns) != 6 {
|
||||||
|
t.Errorf("Expected 6 columns, got %d", len(table.Columns))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_Directory(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "bun"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if db == nil {
|
||||||
|
t.Fatal("ReadDatabase() returned nil database")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should read both simple.go and complex.go
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
t.Fatal("Expected at least one schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
// Should have at least 3 tables from complex.go (users, posts, comments)
|
||||||
|
// plus 1 from simple.go (users) - but same table name, so may be overwritten
|
||||||
|
if len(schema.Tables) < 3 {
|
||||||
|
t.Errorf("Expected at least 3 tables, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_InvalidPath(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "/nonexistent/file.go",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
_, err := reader.ReadDatabase()
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error for invalid file path")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_EmptyPath(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
_, err := reader.ReadDatabase()
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error for empty file path")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_NullableTypes(t *testing.T) {
|
||||||
|
// This test specifically verifies the NOT NULL logic changes
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "bun", "complex.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find posts table
|
||||||
|
var postsTable *models.Table
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if table.Name == "posts" {
|
||||||
|
postsTable = table
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if postsTable == nil {
|
||||||
|
t.Fatal("Posts table not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test all nullability scenarios
|
||||||
|
tests := []struct {
|
||||||
|
column string
|
||||||
|
notNull bool
|
||||||
|
reason string
|
||||||
|
}{
|
||||||
|
{"id", true, "primary key"},
|
||||||
|
{"user_id", true, "explicit notnull tag"},
|
||||||
|
{"title", true, "explicit notnull tag"},
|
||||||
|
{"slug", true, "explicit notnull tag"},
|
||||||
|
{"content", true, "explicit notnull tag"},
|
||||||
|
{"excerpt", false, "pointer type *string"},
|
||||||
|
{"published", true, "primitive bool without nullzero"},
|
||||||
|
{"view_count", true, "primitive int64 without nullzero"},
|
||||||
|
{"published_at", false, "has nullzero tag"},
|
||||||
|
{"created_at", true, "time.Time without nullzero"},
|
||||||
|
{"updated_at", true, "time.Time without nullzero"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
col, exists := postsTable.Columns[tt.column]
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("Column '%s' not found", tt.column)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if col.NotNull != tt.notNull {
|
||||||
|
if tt.notNull {
|
||||||
|
t.Errorf("Column '%s' should be NOT NULL (%s), but NotNull=%v",
|
||||||
|
tt.column, tt.reason, col.NotNull)
|
||||||
|
} else {
|
||||||
|
t.Errorf("Column '%s' should be nullable (%s), but NotNull=%v",
|
||||||
|
tt.column, tt.reason, col.NotNull)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
101
pkg/readers/dbml/README.md
Normal file
101
pkg/readers/dbml/README.md
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# DBML Reader
|
||||||
|
|
||||||
|
Reads Database Markup Language (DBML) files and extracts database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The DBML Reader parses `.dbml` files that define database schemas using the DBML syntax (used by dbdiagram.io) and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Parses DBML syntax
|
||||||
|
- Extracts tables, columns, and relationships
|
||||||
|
- Supports DBML-specific features:
|
||||||
|
- Table groups and notes
|
||||||
|
- Enum definitions
|
||||||
|
- Indexes
|
||||||
|
- Foreign key relationships
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/dbml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/schema.dbml",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := dbml.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read DBML file and convert to JSON
|
||||||
|
relspec --input dbml --in-file schema.dbml --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert DBML to GORM models
|
||||||
|
relspec --input dbml --in-file database.dbml --output gorm --out-file models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example DBML File
|
||||||
|
|
||||||
|
```dbml
|
||||||
|
Table users {
|
||||||
|
id bigserial [pk, increment]
|
||||||
|
username varchar(50) [not null, unique]
|
||||||
|
email varchar(100) [not null]
|
||||||
|
created_at timestamp [not null, default: `now()`]
|
||||||
|
|
||||||
|
Note: 'Users table'
|
||||||
|
}
|
||||||
|
|
||||||
|
Table posts {
|
||||||
|
id bigserial [pk]
|
||||||
|
user_id bigint [not null, ref: > users.id]
|
||||||
|
title varchar(200) [not null]
|
||||||
|
content text
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
user_id
|
||||||
|
(user_id, created_at) [name: 'idx_user_posts']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ref: posts.user_id > users.id [delete: cascade]
|
||||||
|
```
|
||||||
|
|
||||||
|
## DBML Features Supported
|
||||||
|
|
||||||
|
- Table definitions with columns
|
||||||
|
- Primary keys (`pk`)
|
||||||
|
- Not null constraints (`not null`)
|
||||||
|
- Unique constraints (`unique`)
|
||||||
|
- Default values (`default`)
|
||||||
|
- Inline references (`ref`)
|
||||||
|
- Standalone `Ref` blocks
|
||||||
|
- Indexes and composite indexes
|
||||||
|
- Table notes and column notes
|
||||||
|
- Enums
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- DBML is designed for database documentation and diagramming
|
||||||
|
- Schema name defaults to `public`
|
||||||
|
- Relationship cardinality is preserved
|
||||||
96
pkg/readers/dctx/README.md
Normal file
96
pkg/readers/dctx/README.md
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
# DCTX Reader
|
||||||
|
|
||||||
|
Reads Clarion database dictionary (DCTX) files and extracts database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The DCTX Reader parses Clarion dictionary files (`.dctx`) that define database structures in the Clarion development system and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Parses Clarion DCTX XML format
|
||||||
|
- Extracts file (table) and field (column) definitions
|
||||||
|
- Supports Clarion data types
|
||||||
|
- Handles keys (indexes) and relationships
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/dctx"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/database.dctx",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := dctx.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read DCTX file and convert to JSON
|
||||||
|
relspec --input dctx --in-file legacy.dctx --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert DCTX to GORM models for migration
|
||||||
|
relspec --input dctx --in-file app.dctx --output gorm --out-file models.go
|
||||||
|
|
||||||
|
# Export DCTX to PostgreSQL DDL
|
||||||
|
relspec --input dctx --in-file database.dctx --output pgsql --out-file schema.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example DCTX Structure
|
||||||
|
|
||||||
|
DCTX files are XML-based Clarion dictionary files that define:
|
||||||
|
|
||||||
|
- Files (equivalent to tables)
|
||||||
|
- Fields (columns) with Clarion-specific types
|
||||||
|
- Keys (indexes)
|
||||||
|
- Relationships between files
|
||||||
|
|
||||||
|
Common Clarion data types:
|
||||||
|
- `STRING` - Fixed-length string
|
||||||
|
- `CSTRING` - C-style null-terminated string
|
||||||
|
- `LONG` - 32-bit integer
|
||||||
|
- `SHORT` - 16-bit integer
|
||||||
|
- `DECIMAL` - Decimal number
|
||||||
|
- `REAL` - Floating point
|
||||||
|
- `DATE` - Date field
|
||||||
|
- `TIME` - Time field
|
||||||
|
|
||||||
|
## Type Mapping
|
||||||
|
|
||||||
|
The reader automatically maps Clarion data types to standard SQL types:
|
||||||
|
|
||||||
|
| Clarion Type | SQL Type |
|
||||||
|
|--------------|----------|
|
||||||
|
| STRING | VARCHAR |
|
||||||
|
| CSTRING | VARCHAR |
|
||||||
|
| LONG | INTEGER |
|
||||||
|
| SHORT | SMALLINT |
|
||||||
|
| DECIMAL | NUMERIC |
|
||||||
|
| REAL | REAL |
|
||||||
|
| DATE | DATE |
|
||||||
|
| TIME | TIME |
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- DCTX is specific to Clarion development platform
|
||||||
|
- Useful for migrating legacy Clarion applications
|
||||||
|
- Schema name defaults to `public`
|
||||||
|
- Preserves field properties and constraints where possible
|
||||||
96
pkg/readers/drawdb/README.md
Normal file
96
pkg/readers/drawdb/README.md
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
# DrawDB Reader
|
||||||
|
|
||||||
|
Reads DrawDB schema files and extracts database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The DrawDB Reader parses JSON files exported from DrawDB (a free online database design tool) and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Parses DrawDB JSON format
|
||||||
|
- Extracts tables, fields, and relationships
|
||||||
|
- Supports DrawDB-specific metadata
|
||||||
|
- Preserves visual layout information
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/drawdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/diagram.json",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := drawdb.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read DrawDB export and convert to JSON schema
|
||||||
|
relspec --input drawdb --in-file diagram.json --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert DrawDB design to GORM models
|
||||||
|
relspec --input drawdb --in-file design.json --output gorm --out-file models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example DrawDB Export
|
||||||
|
|
||||||
|
DrawDB exports database designs as JSON files containing:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"tables": [
|
||||||
|
{
|
||||||
|
"id": "1",
|
||||||
|
"name": "users",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"type": "BIGINT",
|
||||||
|
"primary": true,
|
||||||
|
"autoIncrement": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "username",
|
||||||
|
"type": "VARCHAR",
|
||||||
|
"size": 50,
|
||||||
|
"notNull": true,
|
||||||
|
"unique": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"relationships": [
|
||||||
|
{
|
||||||
|
"source": "posts",
|
||||||
|
"target": "users",
|
||||||
|
"type": "many-to-one"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- DrawDB is a free online database designer at drawdb.vercel.app
|
||||||
|
- Export format preserves visual design metadata
|
||||||
|
- Useful for converting visual designs to code
|
||||||
|
- Schema defaults to `public`
|
||||||
90
pkg/readers/drizzle/README.md
Normal file
90
pkg/readers/drizzle/README.md
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# Drizzle Reader
|
||||||
|
|
||||||
|
Reads TypeScript/JavaScript files containing Drizzle ORM schema definitions and extracts database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Drizzle Reader parses Drizzle ORM schema files (TypeScript/JavaScript) that define database tables using Drizzle's schema builder and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Parses Drizzle schema definitions
|
||||||
|
- Extracts table, column, and relationship information
|
||||||
|
- Supports various Drizzle column types
|
||||||
|
- Handles constraints and indexes
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/drizzle"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/schema.ts",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := drizzle.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read Drizzle schema and convert to JSON
|
||||||
|
relspec --input drizzle --in-file schema.ts --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert Drizzle to GORM models
|
||||||
|
relspec --input drizzle --in-file schema/ --output gorm --out-file models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example Drizzle Schema
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { pgTable, serial, varchar, text, timestamp, integer } from 'drizzle-orm/pg-core';
|
||||||
|
import { relations } from 'drizzle-orm';
|
||||||
|
|
||||||
|
export const users = pgTable('users', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
username: varchar('username', { length: 50 }).notNull().unique(),
|
||||||
|
email: varchar('email', { length: 100 }).notNull(),
|
||||||
|
createdAt: timestamp('created_at').notNull().defaultNow(),
|
||||||
|
});
|
||||||
|
|
||||||
|
export const posts = pgTable('posts', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
userId: integer('user_id').notNull().references(() => users.id, { onDelete: 'cascade' }),
|
||||||
|
title: varchar('title', { length: 200 }).notNull(),
|
||||||
|
content: text('content'),
|
||||||
|
});
|
||||||
|
|
||||||
|
export const usersRelations = relations(users, ({ many }) => ({
|
||||||
|
posts: many(posts),
|
||||||
|
}));
|
||||||
|
|
||||||
|
export const postsRelations = relations(posts, ({ one }) => ({
|
||||||
|
user: one(users, {
|
||||||
|
fields: [posts.userId],
|
||||||
|
references: [users.id],
|
||||||
|
}),
|
||||||
|
}));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Supports both PostgreSQL and MySQL Drizzle schemas
|
||||||
|
- Extracts relationship information from `relations` definitions
|
||||||
|
- Schema defaults to `public` for PostgreSQL
|
||||||
619
pkg/readers/drizzle/reader.go
Normal file
619
pkg/readers/drizzle/reader.go
Normal file
@@ -0,0 +1,619 @@
|
|||||||
|
package drizzle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reader implements the readers.Reader interface for Drizzle schema format
|
||||||
|
type Reader struct {
|
||||||
|
options *readers.ReaderOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader creates a new Drizzle reader with the given options
|
||||||
|
func NewReader(options *readers.ReaderOptions) *Reader {
|
||||||
|
return &Reader{
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDatabase reads and parses Drizzle schema input, returning a Database model
|
||||||
|
func (r *Reader) ReadDatabase() (*models.Database, error) {
|
||||||
|
if r.options.FilePath == "" {
|
||||||
|
return nil, fmt.Errorf("file path is required for Drizzle reader")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's a file or directory
|
||||||
|
info, err := os.Stat(r.options.FilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to stat path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
// Read all .ts files in the directory
|
||||||
|
return r.readDirectory(r.options.FilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read single file
|
||||||
|
content, err := os.ReadFile(r.options.FilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.parseDrizzle(string(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSchema reads and parses Drizzle schema input, returning a Schema model
|
||||||
|
func (r *Reader) ReadSchema() (*models.Schema, error) {
|
||||||
|
db, err := r.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
return nil, fmt.Errorf("no schemas found in Drizzle schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the first schema
|
||||||
|
return db.Schemas[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadTable reads and parses Drizzle schema input, returning a Table model
|
||||||
|
func (r *Reader) ReadTable() (*models.Table, error) {
|
||||||
|
schema, err := r.ReadSchema()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) == 0 {
|
||||||
|
return nil, fmt.Errorf("no tables found in Drizzle schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the first table
|
||||||
|
return schema.Tables[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readDirectory reads all .ts files in a directory and parses them
|
||||||
|
func (r *Reader) readDirectory(dirPath string) (*models.Database, error) {
|
||||||
|
db := models.InitDatabase("database")
|
||||||
|
|
||||||
|
if r.options.Metadata != nil {
|
||||||
|
if name, ok := r.options.Metadata["name"].(string); ok {
|
||||||
|
db.Name = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default schema for Drizzle
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
schema.Enums = make([]*models.Enum, 0)
|
||||||
|
|
||||||
|
// Read all .ts files
|
||||||
|
files, err := filepath.Glob(filepath.Join(dirPath, "*.ts"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to glob directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse each file
|
||||||
|
for _, file := range files {
|
||||||
|
content, err := os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read file %s: %w", file, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse and merge into schema
|
||||||
|
fileDB, err := r.parseDrizzle(string(content))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse file %s: %w", file, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge schemas
|
||||||
|
if len(fileDB.Schemas) > 0 {
|
||||||
|
fileSchema := fileDB.Schemas[0]
|
||||||
|
schema.Tables = append(schema.Tables, fileSchema.Tables...)
|
||||||
|
schema.Enums = append(schema.Enums, fileSchema.Enums...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.Schemas = append(db.Schemas, schema)
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDrizzle parses Drizzle schema content and returns a Database model
|
||||||
|
func (r *Reader) parseDrizzle(content string) (*models.Database, error) {
|
||||||
|
db := models.InitDatabase("database")
|
||||||
|
|
||||||
|
if r.options.Metadata != nil {
|
||||||
|
if name, ok := r.options.Metadata["name"].(string); ok {
|
||||||
|
db.Name = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default schema for Drizzle (PostgreSQL)
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
schema.Enums = make([]*models.Enum, 0)
|
||||||
|
db.DatabaseType = models.PostgresqlDatabaseType
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(content))
|
||||||
|
|
||||||
|
// Regex patterns
|
||||||
|
// Match: export const users = pgTable('users', {
|
||||||
|
pgTableRegex := regexp.MustCompile(`export\s+const\s+(\w+)\s*=\s*pgTable\s*\(\s*['"](\w+)['"]`)
|
||||||
|
// Match: export const userRole = pgEnum('UserRole', ['admin', 'user']);
|
||||||
|
pgEnumRegex := regexp.MustCompile(`export\s+const\s+(\w+)\s*=\s*pgEnum\s*\(\s*['"](\w+)['"]`)
|
||||||
|
|
||||||
|
// State tracking
|
||||||
|
var currentTable *models.Table
|
||||||
|
var currentTableVarName string
|
||||||
|
var inTableBlock bool
|
||||||
|
var blockDepth int
|
||||||
|
var tableLines []string
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
|
||||||
|
// Skip empty lines and comments
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for pgEnum definition
|
||||||
|
if matches := pgEnumRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
enum := r.parsePgEnum(trimmed, matches)
|
||||||
|
if enum != nil {
|
||||||
|
schema.Enums = append(schema.Enums, enum)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for pgTable definition
|
||||||
|
if matches := pgTableRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
varName := matches[1]
|
||||||
|
tableName := matches[2]
|
||||||
|
|
||||||
|
currentTableVarName = varName
|
||||||
|
currentTable = models.InitTable(tableName, "public")
|
||||||
|
inTableBlock = true
|
||||||
|
// Count braces in the first line
|
||||||
|
blockDepth = strings.Count(line, "{") - strings.Count(line, "}")
|
||||||
|
tableLines = []string{line}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're in a table block, accumulate lines
|
||||||
|
if inTableBlock {
|
||||||
|
tableLines = append(tableLines, line)
|
||||||
|
|
||||||
|
// Track brace depth
|
||||||
|
blockDepth += strings.Count(line, "{")
|
||||||
|
blockDepth -= strings.Count(line, "}")
|
||||||
|
|
||||||
|
// Check if we've closed the table definition
|
||||||
|
if blockDepth < 0 || (blockDepth == 0 && strings.Contains(line, ");")) {
|
||||||
|
// Parse the complete table block
|
||||||
|
if currentTable != nil {
|
||||||
|
r.parseTableBlock(tableLines, currentTable, currentTableVarName)
|
||||||
|
schema.Tables = append(schema.Tables, currentTable)
|
||||||
|
currentTable = nil
|
||||||
|
}
|
||||||
|
inTableBlock = false
|
||||||
|
tableLines = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.Schemas = append(db.Schemas, schema)
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePgEnum parses a pgEnum definition
|
||||||
|
func (r *Reader) parsePgEnum(line string, matches []string) *models.Enum {
|
||||||
|
// matches[1] = variable name
|
||||||
|
// matches[2] = enum name
|
||||||
|
|
||||||
|
enumName := matches[2]
|
||||||
|
|
||||||
|
// Extract values from the array
|
||||||
|
// Example: pgEnum('UserRole', ['admin', 'user', 'guest'])
|
||||||
|
valuesRegex := regexp.MustCompile(`\[(.*?)\]`)
|
||||||
|
valuesMatch := valuesRegex.FindStringSubmatch(line)
|
||||||
|
if valuesMatch == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
valuesStr := valuesMatch[1]
|
||||||
|
// Split by comma and clean up
|
||||||
|
valueParts := strings.Split(valuesStr, ",")
|
||||||
|
values := make([]string, 0)
|
||||||
|
for _, part := range valueParts {
|
||||||
|
// Remove quotes and whitespace
|
||||||
|
cleaned := strings.TrimSpace(part)
|
||||||
|
cleaned = strings.Trim(cleaned, "'\"")
|
||||||
|
if cleaned != "" {
|
||||||
|
values = append(values, cleaned)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &models.Enum{
|
||||||
|
Name: enumName,
|
||||||
|
Values: values,
|
||||||
|
Schema: "public",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTableBlock parses a complete pgTable definition block
|
||||||
|
func (r *Reader) parseTableBlock(lines []string, table *models.Table, tableVarName string) {
|
||||||
|
// Join all lines into a single string for easier parsing
|
||||||
|
fullText := strings.Join(lines, "\n")
|
||||||
|
|
||||||
|
// Extract the columns block and index callback separately
|
||||||
|
// The structure is: pgTable('name', { columns }, (table) => [indexes])
|
||||||
|
|
||||||
|
// Find the main object block (columns)
|
||||||
|
columnsStart := strings.Index(fullText, "{")
|
||||||
|
if columnsStart == -1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find matching closing brace for columns
|
||||||
|
depth := 0
|
||||||
|
columnsEnd := -1
|
||||||
|
for i := columnsStart; i < len(fullText); i++ {
|
||||||
|
if fullText[i] == '{' {
|
||||||
|
depth++
|
||||||
|
} else if fullText[i] == '}' {
|
||||||
|
depth--
|
||||||
|
if depth == 0 {
|
||||||
|
columnsEnd = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if columnsEnd == -1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
columnsBlock := fullText[columnsStart+1 : columnsEnd]
|
||||||
|
|
||||||
|
// Parse columns
|
||||||
|
r.parseColumnsBlock(columnsBlock, table, tableVarName)
|
||||||
|
|
||||||
|
// Check for index callback: , (table) => [ or , ({ col1, col2 }) => [
|
||||||
|
// Match: }, followed by arrow function with any parameters
|
||||||
|
// Use (?s) flag to make . match newlines
|
||||||
|
indexCallbackRegex := regexp.MustCompile(`(?s)}\s*,\s*\(.*?\)\s*=>\s*\[`)
|
||||||
|
if indexCallbackRegex.MatchString(fullText[columnsEnd:]) {
|
||||||
|
// Find the index array
|
||||||
|
indexStart := strings.Index(fullText[columnsEnd:], "[")
|
||||||
|
if indexStart != -1 {
|
||||||
|
indexStart += columnsEnd
|
||||||
|
indexDepth := 0
|
||||||
|
indexEnd := -1
|
||||||
|
for i := indexStart; i < len(fullText); i++ {
|
||||||
|
if fullText[i] == '[' {
|
||||||
|
indexDepth++
|
||||||
|
} else if fullText[i] == ']' {
|
||||||
|
indexDepth--
|
||||||
|
if indexDepth == 0 {
|
||||||
|
indexEnd = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if indexEnd != -1 {
|
||||||
|
indexBlock := fullText[indexStart+1 : indexEnd]
|
||||||
|
r.parseIndexBlock(indexBlock, table, tableVarName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseColumnsBlock parses the columns block of a table
|
||||||
|
func (r *Reader) parseColumnsBlock(block string, table *models.Table, tableVarName string) {
|
||||||
|
// Split by lines and parse each column definition
|
||||||
|
lines := strings.Split(block, "\n")
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match: fieldName: columnType('columnName').modifier().modifier(),
|
||||||
|
// Example: id: integer('id').primaryKey(),
|
||||||
|
columnRegex := regexp.MustCompile(`(\w+):\s*(\w+)\s*\(`)
|
||||||
|
matches := columnRegex.FindStringSubmatch(trimmed)
|
||||||
|
if matches == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := matches[1]
|
||||||
|
columnType := matches[2]
|
||||||
|
|
||||||
|
// Parse the column definition
|
||||||
|
col := r.parseColumnDefinition(trimmed, fieldName, columnType, table)
|
||||||
|
if col != nil {
|
||||||
|
table.Columns[col.Name] = col
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseColumnDefinition parses a single column definition line
|
||||||
|
func (r *Reader) parseColumnDefinition(line, fieldName, drizzleType string, table *models.Table) *models.Column {
|
||||||
|
// Check for enum column syntax: pgEnum('EnumName')('column_name')
|
||||||
|
enumRegex := regexp.MustCompile(`pgEnum\s*\(['"](\w+)['"]\)\s*\(['"](\w+)['"]\)`)
|
||||||
|
if enumMatch := enumRegex.FindStringSubmatch(line); enumMatch != nil {
|
||||||
|
enumName := enumMatch[1]
|
||||||
|
columnName := enumMatch[2]
|
||||||
|
|
||||||
|
column := models.InitColumn(columnName, table.Name, table.Schema)
|
||||||
|
column.Type = enumName
|
||||||
|
column.NotNull = false
|
||||||
|
|
||||||
|
// Parse modifiers
|
||||||
|
r.parseColumnModifiers(line, column, table)
|
||||||
|
return column
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract column name from the first argument
|
||||||
|
// Example: integer('id')
|
||||||
|
nameRegex := regexp.MustCompile(`\w+\s*\(['"](\w+)['"]\)`)
|
||||||
|
nameMatch := nameRegex.FindStringSubmatch(line)
|
||||||
|
if nameMatch == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
columnName := nameMatch[1]
|
||||||
|
column := models.InitColumn(columnName, table.Name, table.Schema)
|
||||||
|
|
||||||
|
// Map Drizzle type to SQL type
|
||||||
|
column.Type = r.drizzleTypeToSQL(drizzleType)
|
||||||
|
|
||||||
|
// Default: columns are nullable unless specified
|
||||||
|
column.NotNull = false
|
||||||
|
|
||||||
|
// Parse modifiers
|
||||||
|
r.parseColumnModifiers(line, column, table)
|
||||||
|
|
||||||
|
return column
|
||||||
|
}
|
||||||
|
|
||||||
|
// drizzleTypeToSQL converts Drizzle column types to SQL types
|
||||||
|
func (r *Reader) drizzleTypeToSQL(drizzleType string) string {
|
||||||
|
typeMap := map[string]string{
|
||||||
|
// Integer types
|
||||||
|
"integer": "integer",
|
||||||
|
"bigint": "bigint",
|
||||||
|
"smallint": "smallint",
|
||||||
|
|
||||||
|
// Serial types
|
||||||
|
"serial": "serial",
|
||||||
|
"bigserial": "bigserial",
|
||||||
|
"smallserial": "smallserial",
|
||||||
|
|
||||||
|
// Numeric types
|
||||||
|
"numeric": "numeric",
|
||||||
|
"real": "real",
|
||||||
|
"doublePrecision": "double precision",
|
||||||
|
|
||||||
|
// Character types
|
||||||
|
"text": "text",
|
||||||
|
"varchar": "varchar",
|
||||||
|
"char": "char",
|
||||||
|
|
||||||
|
// Boolean
|
||||||
|
"boolean": "boolean",
|
||||||
|
|
||||||
|
// Binary
|
||||||
|
"bytea": "bytea",
|
||||||
|
|
||||||
|
// JSON
|
||||||
|
"json": "json",
|
||||||
|
"jsonb": "jsonb",
|
||||||
|
|
||||||
|
// Date/Time
|
||||||
|
"time": "time",
|
||||||
|
"timestamp": "timestamp",
|
||||||
|
"date": "date",
|
||||||
|
"interval": "interval",
|
||||||
|
|
||||||
|
// UUID
|
||||||
|
"uuid": "uuid",
|
||||||
|
|
||||||
|
// Geometric
|
||||||
|
"point": "point",
|
||||||
|
"line": "line",
|
||||||
|
}
|
||||||
|
|
||||||
|
if sqlType, ok := typeMap[drizzleType]; ok {
|
||||||
|
return sqlType
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not found, might be an enum - return as-is
|
||||||
|
return drizzleType
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseColumnModifiers parses column modifiers like .primaryKey(), .notNull(), etc.
|
||||||
|
func (r *Reader) parseColumnModifiers(line string, column *models.Column, table *models.Table) {
|
||||||
|
// Check for .primaryKey()
|
||||||
|
if strings.Contains(line, ".primaryKey()") {
|
||||||
|
column.IsPrimaryKey = true
|
||||||
|
column.NotNull = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for .notNull()
|
||||||
|
if strings.Contains(line, ".notNull()") {
|
||||||
|
column.NotNull = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for .unique()
|
||||||
|
if strings.Contains(line, ".unique()") {
|
||||||
|
uniqueConstraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("uq_%s", column.Name),
|
||||||
|
models.UniqueConstraint,
|
||||||
|
)
|
||||||
|
uniqueConstraint.Schema = table.Schema
|
||||||
|
uniqueConstraint.Table = table.Name
|
||||||
|
uniqueConstraint.Columns = []string{column.Name}
|
||||||
|
table.Constraints[uniqueConstraint.Name] = uniqueConstraint
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for .default(...)
|
||||||
|
// Need to handle nested backticks and parentheses in SQL expressions
|
||||||
|
defaultIdx := strings.Index(line, ".default(")
|
||||||
|
if defaultIdx != -1 {
|
||||||
|
start := defaultIdx + len(".default(")
|
||||||
|
depth := 1
|
||||||
|
inBacktick := false
|
||||||
|
i := start
|
||||||
|
|
||||||
|
for i < len(line) && depth > 0 {
|
||||||
|
ch := line[i]
|
||||||
|
if ch == '`' {
|
||||||
|
inBacktick = !inBacktick
|
||||||
|
} else if !inBacktick {
|
||||||
|
switch ch {
|
||||||
|
case '(':
|
||||||
|
depth++
|
||||||
|
case ')':
|
||||||
|
depth--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
if depth == 0 {
|
||||||
|
defaultValue := strings.TrimSpace(line[start : i-1])
|
||||||
|
r.parseDefaultValue(defaultValue, column)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for .generatedAlwaysAsIdentity()
|
||||||
|
if strings.Contains(line, ".generatedAlwaysAsIdentity()") {
|
||||||
|
column.AutoIncrement = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for .references(() => otherTable.column)
|
||||||
|
referencesRegex := regexp.MustCompile(`\.references\(\(\)\s*=>\s*(\w+)\.(\w+)\)`)
|
||||||
|
if matches := referencesRegex.FindStringSubmatch(line); matches != nil {
|
||||||
|
refTableVar := matches[1]
|
||||||
|
refColumn := matches[2]
|
||||||
|
|
||||||
|
// Create FK constraint
|
||||||
|
constraintName := fmt.Sprintf("fk_%s_%s", table.Name, column.Name)
|
||||||
|
constraint := models.InitConstraint(constraintName, models.ForeignKeyConstraint)
|
||||||
|
constraint.Schema = table.Schema
|
||||||
|
constraint.Table = table.Name
|
||||||
|
constraint.Columns = []string{column.Name}
|
||||||
|
constraint.ReferencedSchema = table.Schema // Assume same schema
|
||||||
|
constraint.ReferencedTable = r.varNameToTableName(refTableVar)
|
||||||
|
constraint.ReferencedColumns = []string{refColumn}
|
||||||
|
|
||||||
|
table.Constraints[constraint.Name] = constraint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDefaultValue parses a default value expression
|
||||||
|
func (r *Reader) parseDefaultValue(defaultExpr string, column *models.Column) {
|
||||||
|
defaultExpr = strings.TrimSpace(defaultExpr)
|
||||||
|
|
||||||
|
// Handle SQL expressions like sql`now()`
|
||||||
|
sqlRegex := regexp.MustCompile("sql`([^`]+)`")
|
||||||
|
if match := sqlRegex.FindStringSubmatch(defaultExpr); match != nil {
|
||||||
|
column.Default = match[1]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle boolean values
|
||||||
|
if defaultExpr == "true" {
|
||||||
|
column.Default = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if defaultExpr == "false" {
|
||||||
|
column.Default = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle string literals
|
||||||
|
if strings.HasPrefix(defaultExpr, "'") && strings.HasSuffix(defaultExpr, "'") {
|
||||||
|
column.Default = defaultExpr[1 : len(defaultExpr)-1]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(defaultExpr, "\"") && strings.HasSuffix(defaultExpr, "\"") {
|
||||||
|
column.Default = defaultExpr[1 : len(defaultExpr)-1]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to parse as number
|
||||||
|
column.Default = defaultExpr
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseIndexBlock parses the index callback block
|
||||||
|
func (r *Reader) parseIndexBlock(block string, table *models.Table, tableVarName string) {
|
||||||
|
// Split by lines
|
||||||
|
lines := strings.Split(block, "\n")
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match: index('index_name').on(table.col1, table.col2)
|
||||||
|
// or: uniqueIndex('index_name').on(table.col1, table.col2)
|
||||||
|
indexRegex := regexp.MustCompile(`(uniqueIndex|index)\s*\(['"](\w+)['"]\)\s*\.on\s*\((.*?)\)`)
|
||||||
|
matches := indexRegex.FindStringSubmatch(trimmed)
|
||||||
|
if matches == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
indexType := matches[1]
|
||||||
|
indexName := matches[2]
|
||||||
|
columnsStr := matches[3]
|
||||||
|
|
||||||
|
// Parse column list
|
||||||
|
columnParts := strings.Split(columnsStr, ",")
|
||||||
|
columns := make([]string, 0)
|
||||||
|
for _, part := range columnParts {
|
||||||
|
// Remove table prefix: table.column -> column
|
||||||
|
cleaned := strings.TrimSpace(part)
|
||||||
|
if strings.Contains(cleaned, ".") {
|
||||||
|
parts := strings.Split(cleaned, ".")
|
||||||
|
cleaned = parts[len(parts)-1]
|
||||||
|
}
|
||||||
|
columns = append(columns, cleaned)
|
||||||
|
}
|
||||||
|
|
||||||
|
if indexType == "uniqueIndex" {
|
||||||
|
// Create unique constraint
|
||||||
|
constraint := models.InitConstraint(indexName, models.UniqueConstraint)
|
||||||
|
constraint.Schema = table.Schema
|
||||||
|
constraint.Table = table.Name
|
||||||
|
constraint.Columns = columns
|
||||||
|
table.Constraints[constraint.Name] = constraint
|
||||||
|
} else {
|
||||||
|
// Create index
|
||||||
|
index := models.InitIndex(indexName, table.Name, table.Schema)
|
||||||
|
index.Columns = columns
|
||||||
|
index.Unique = false
|
||||||
|
table.Indexes[index.Name] = index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// varNameToTableName converts a variable name to a table name
|
||||||
|
// For now, just return as-is (could add inflection later)
|
||||||
|
func (r *Reader) varNameToTableName(varName string) string {
|
||||||
|
// TODO: Could add conversion logic here if needed
|
||||||
|
// For now, assume variable name matches table name
|
||||||
|
return varName
|
||||||
|
}
|
||||||
141
pkg/readers/gorm/README.md
Normal file
141
pkg/readers/gorm/README.md
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
# GORM Reader
|
||||||
|
|
||||||
|
Reads Go source files containing GORM model definitions and extracts database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The GORM Reader parses Go source code files that define GORM models (structs with `gorm` struct tags) and converts them into RelSpec's internal database model representation. It supports reading from individual files or entire directories.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Parses GORM struct tags to extract column definitions
|
||||||
|
- Extracts table names from `TableName()` methods
|
||||||
|
- Identifies primary keys, foreign keys, and indexes
|
||||||
|
- Supports relationship detection (has-many, belongs-to)
|
||||||
|
- Handles both single files and directories
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Read from a single file
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/models.go",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := gorm.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reading from Directory
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Read all .go files from a directory
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/models/",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := gorm.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read GORM models and convert to JSON
|
||||||
|
relspec --input gorm --in-file models/ --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert GORM models to Bun
|
||||||
|
relspec --input gorm --in-file models.go --output bun --out-file bun_models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported GORM Tags
|
||||||
|
|
||||||
|
The reader recognizes the following GORM struct tags:
|
||||||
|
|
||||||
|
- `column` - Column name
|
||||||
|
- `type` - SQL data type (e.g., `varchar(255)`, `bigint`)
|
||||||
|
- `primaryKey` or `primary_key` - Mark as primary key
|
||||||
|
- `not null` - NOT NULL constraint
|
||||||
|
- `autoIncrement` - Auto-increment column
|
||||||
|
- `default` - Default value
|
||||||
|
- `size` - Column size/length
|
||||||
|
- `index` - Create index
|
||||||
|
- `uniqueIndex` - Create unique index
|
||||||
|
- `unique` - Unique constraint
|
||||||
|
- `foreignKey` - Foreign key column
|
||||||
|
- `references` - Referenced column
|
||||||
|
- `constraint` - Constraint behavior (OnDelete, OnUpdate)
|
||||||
|
|
||||||
|
## Example GORM Model
|
||||||
|
|
||||||
|
```go
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ModelUser struct {
|
||||||
|
gorm.Model
|
||||||
|
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement"`
|
||||||
|
Username string `gorm:"column:username;type:varchar(50);not null;uniqueIndex"`
|
||||||
|
Email string `gorm:"column:email;type:varchar(100);not null"`
|
||||||
|
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:now()"`
|
||||||
|
|
||||||
|
// Relationships
|
||||||
|
Posts []*ModelPost `gorm:"foreignKey:UserID;references:ID;constraint:OnDelete:CASCADE"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ModelUser) TableName() string {
|
||||||
|
return "public.users"
|
||||||
|
}
|
||||||
|
|
||||||
|
type ModelPost struct {
|
||||||
|
ID int64 `gorm:"column:id;type:bigint;primaryKey"`
|
||||||
|
UserID int64 `gorm:"column:user_id;type:bigint;not null"`
|
||||||
|
Title string `gorm:"column:title;type:varchar(200);not null"`
|
||||||
|
Content string `gorm:"column:content;type:text"`
|
||||||
|
|
||||||
|
// Belongs-to relationship
|
||||||
|
User *ModelUser `gorm:"foreignKey:UserID;references:ID"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ModelPost) TableName() string {
|
||||||
|
return "public.posts"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Test files (ending in `_test.go`) are automatically excluded
|
||||||
|
- The `gorm.Model` embedded struct is automatically recognized and skipped
|
||||||
|
- Table names are derived from struct names if `TableName()` method is not present
|
||||||
|
- Schema defaults to `public` if not specified in `TableName()`
|
||||||
|
- Relationships are inferred from GORM relationship tags
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
- Complex relationship types (many-to-many with join tables) may need manual verification
|
||||||
|
- Custom GORM types may not be fully supported
|
||||||
|
- Some advanced GORM features may not be captured
|
||||||
@@ -693,7 +693,7 @@ func (r *Reader) deriveTableName(structName string) string {
|
|||||||
|
|
||||||
// parseColumn parses a struct field into a Column model
|
// parseColumn parses a struct field into a Column model
|
||||||
// Returns the column and any inline reference information (e.g., "mainaccount(id_mainaccount)")
|
// Returns the column and any inline reference information (e.g., "mainaccount(id_mainaccount)")
|
||||||
func (r *Reader) parseColumn(fieldName string, fieldType ast.Expr, tag string, sequence uint) (*models.Column, string) {
|
func (r *Reader) parseColumn(fieldName string, fieldType ast.Expr, tag string, sequence uint) (col *models.Column, ref string) {
|
||||||
// Extract gorm tag
|
// Extract gorm tag
|
||||||
gormTag := r.extractGormTag(tag)
|
gormTag := r.extractGormTag(tag)
|
||||||
if gormTag == "" {
|
if gormTag == "" {
|
||||||
@@ -756,20 +756,14 @@ func (r *Reader) parseColumn(fieldName string, fieldType ast.Expr, tag string, s
|
|||||||
// - explicit "not null" tag means NOT NULL
|
// - explicit "not null" tag means NOT NULL
|
||||||
// - absence of "not null" tag with sql_types means nullable
|
// - absence of "not null" tag with sql_types means nullable
|
||||||
// - primitive types (string, int64, bool) default to NOT NULL unless explicitly nullable
|
// - primitive types (string, int64, bool) default to NOT NULL unless explicitly nullable
|
||||||
|
// Primary keys are always NOT NULL
|
||||||
|
column.NotNull = false
|
||||||
if _, hasNotNull := parts["not null"]; hasNotNull {
|
if _, hasNotNull := parts["not null"]; hasNotNull {
|
||||||
column.NotNull = true
|
column.NotNull = true
|
||||||
} else {
|
} else {
|
||||||
// If no explicit "not null" tag, check the Go type
|
// sql_types.SqlString, etc. are nullable by default
|
||||||
if r.isNullableGoType(fieldType) {
|
column.NotNull = !r.isNullableGoType(fieldType)
|
||||||
// sql_types.SqlString, etc. are nullable by default
|
|
||||||
column.NotNull = false
|
|
||||||
} else {
|
|
||||||
// Primitive types default to NOT NULL
|
|
||||||
column.NotNull = false // Default to nullable unless explicitly set
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Primary keys are always NOT NULL
|
|
||||||
if column.IsPrimaryKey {
|
if column.IsPrimaryKey {
|
||||||
column.NotNull = true
|
column.NotNull = true
|
||||||
}
|
}
|
||||||
|
|||||||
464
pkg/readers/gorm/reader_test.go
Normal file
464
pkg/readers/gorm/reader_test.go
Normal file
@@ -0,0 +1,464 @@
|
|||||||
|
package gorm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_Simple(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "gorm", "simple.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if db == nil {
|
||||||
|
t.Fatal("ReadDatabase() returned nil database")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
t.Fatal("Expected at least one schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
if schema.Name != "public" {
|
||||||
|
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) != 1 {
|
||||||
|
t.Fatalf("Expected 1 table, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
|
||||||
|
table := schema.Tables[0]
|
||||||
|
if table.Name != "users" {
|
||||||
|
t.Errorf("Expected table name 'users', got '%s'", table.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(table.Columns) != 6 {
|
||||||
|
t.Errorf("Expected 6 columns, got %d", len(table.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify id column - primary key should be NOT NULL
|
||||||
|
idCol, exists := table.Columns["id"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'id' not found")
|
||||||
|
}
|
||||||
|
if !idCol.IsPrimaryKey {
|
||||||
|
t.Error("Column 'id' should be primary key")
|
||||||
|
}
|
||||||
|
if !idCol.AutoIncrement {
|
||||||
|
t.Error("Column 'id' should be auto-increment")
|
||||||
|
}
|
||||||
|
if !idCol.NotNull {
|
||||||
|
t.Error("Column 'id' should be NOT NULL (primary keys are always NOT NULL)")
|
||||||
|
}
|
||||||
|
if idCol.Type != "bigint" {
|
||||||
|
t.Errorf("Expected id type 'bigint', got '%s'", idCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify email column - explicit "not null" tag should be NOT NULL
|
||||||
|
emailCol, exists := table.Columns["email"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'email' not found")
|
||||||
|
}
|
||||||
|
if !emailCol.NotNull {
|
||||||
|
t.Error("Column 'email' should be NOT NULL (explicit 'not null' tag)")
|
||||||
|
}
|
||||||
|
if emailCol.Type != "varchar" || emailCol.Length != 255 {
|
||||||
|
t.Errorf("Expected email type 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify name column - primitive string type should be NOT NULL by default
|
||||||
|
nameCol, exists := table.Columns["name"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'name' not found")
|
||||||
|
}
|
||||||
|
if !nameCol.NotNull {
|
||||||
|
t.Error("Column 'name' should be NOT NULL (primitive string type defaults to NOT NULL)")
|
||||||
|
}
|
||||||
|
if nameCol.Type != "text" {
|
||||||
|
t.Errorf("Expected name type 'text', got '%s'", nameCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify age column - pointer type should be nullable (NOT NULL = false)
|
||||||
|
ageCol, exists := table.Columns["age"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'age' not found")
|
||||||
|
}
|
||||||
|
if ageCol.NotNull {
|
||||||
|
t.Error("Column 'age' should be nullable (pointer type *int)")
|
||||||
|
}
|
||||||
|
if ageCol.Type != "integer" {
|
||||||
|
t.Errorf("Expected age type 'integer', got '%s'", ageCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify is_active column - primitive bool type should be NOT NULL by default
|
||||||
|
isActiveCol, exists := table.Columns["is_active"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'is_active' not found")
|
||||||
|
}
|
||||||
|
if !isActiveCol.NotNull {
|
||||||
|
t.Error("Column 'is_active' should be NOT NULL (primitive bool type defaults to NOT NULL)")
|
||||||
|
}
|
||||||
|
if isActiveCol.Type != "boolean" {
|
||||||
|
t.Errorf("Expected is_active type 'boolean', got '%s'", isActiveCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify created_at column - time.Time should be NOT NULL by default
|
||||||
|
createdAtCol, exists := table.Columns["created_at"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'created_at' not found")
|
||||||
|
}
|
||||||
|
if !createdAtCol.NotNull {
|
||||||
|
t.Error("Column 'created_at' should be NOT NULL (time.Time is NOT NULL by default)")
|
||||||
|
}
|
||||||
|
if createdAtCol.Type != "timestamp" {
|
||||||
|
t.Errorf("Expected created_at type 'timestamp', got '%s'", createdAtCol.Type)
|
||||||
|
}
|
||||||
|
if createdAtCol.Default != "now()" {
|
||||||
|
t.Errorf("Expected created_at default 'now()', got '%v'", createdAtCol.Default)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_Complex(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "gorm", "complex.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if db == nil {
|
||||||
|
t.Fatal("ReadDatabase() returned nil database")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify schema
|
||||||
|
if len(db.Schemas) != 1 {
|
||||||
|
t.Fatalf("Expected 1 schema, got %d", len(db.Schemas))
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
if schema.Name != "public" {
|
||||||
|
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify tables
|
||||||
|
if len(schema.Tables) != 3 {
|
||||||
|
t.Fatalf("Expected 3 tables, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find tables
|
||||||
|
var usersTable, postsTable, commentsTable *models.Table
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
switch table.Name {
|
||||||
|
case "users":
|
||||||
|
usersTable = table
|
||||||
|
case "posts":
|
||||||
|
postsTable = table
|
||||||
|
case "comments":
|
||||||
|
commentsTable = table
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if usersTable == nil {
|
||||||
|
t.Fatal("Users table not found")
|
||||||
|
}
|
||||||
|
if postsTable == nil {
|
||||||
|
t.Fatal("Posts table not found")
|
||||||
|
}
|
||||||
|
if commentsTable == nil {
|
||||||
|
t.Fatal("Comments table not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify users table - test NOT NULL logic for various field types
|
||||||
|
if len(usersTable.Columns) != 10 {
|
||||||
|
t.Errorf("Expected 10 columns in users table, got %d", len(usersTable.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
// username - NOT NULL (explicit tag)
|
||||||
|
usernameCol, exists := usersTable.Columns["username"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'username' not found")
|
||||||
|
}
|
||||||
|
if !usernameCol.NotNull {
|
||||||
|
t.Error("Column 'username' should be NOT NULL (explicit 'not null' tag)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// first_name - nullable (pointer type)
|
||||||
|
firstNameCol, exists := usersTable.Columns["first_name"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'first_name' not found")
|
||||||
|
}
|
||||||
|
if firstNameCol.NotNull {
|
||||||
|
t.Error("Column 'first_name' should be nullable (pointer type *string)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// last_name - nullable (pointer type)
|
||||||
|
lastNameCol, exists := usersTable.Columns["last_name"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'last_name' not found")
|
||||||
|
}
|
||||||
|
if lastNameCol.NotNull {
|
||||||
|
t.Error("Column 'last_name' should be nullable (pointer type *string)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// bio - nullable (pointer type)
|
||||||
|
bioCol, exists := usersTable.Columns["bio"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'bio' not found")
|
||||||
|
}
|
||||||
|
if bioCol.NotNull {
|
||||||
|
t.Error("Column 'bio' should be nullable (pointer type *string)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// is_active - NOT NULL (primitive bool)
|
||||||
|
isActiveCol, exists := usersTable.Columns["is_active"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'is_active' not found")
|
||||||
|
}
|
||||||
|
if !isActiveCol.NotNull {
|
||||||
|
t.Error("Column 'is_active' should be NOT NULL (primitive bool type)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify users table indexes
|
||||||
|
if len(usersTable.Indexes) < 1 {
|
||||||
|
t.Error("Expected at least 1 index on users table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify posts table
|
||||||
|
if len(postsTable.Columns) != 11 {
|
||||||
|
t.Errorf("Expected 11 columns in posts table, got %d", len(postsTable.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
// excerpt - nullable (pointer type)
|
||||||
|
excerptCol, exists := postsTable.Columns["excerpt"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'excerpt' not found")
|
||||||
|
}
|
||||||
|
if excerptCol.NotNull {
|
||||||
|
t.Error("Column 'excerpt' should be nullable (pointer type *string)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// published - NOT NULL (primitive bool with default)
|
||||||
|
publishedCol, exists := postsTable.Columns["published"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'published' not found")
|
||||||
|
}
|
||||||
|
if !publishedCol.NotNull {
|
||||||
|
t.Error("Column 'published' should be NOT NULL (primitive bool type)")
|
||||||
|
}
|
||||||
|
if publishedCol.Default != "false" {
|
||||||
|
t.Errorf("Expected published default 'false', got '%v'", publishedCol.Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
// published_at - nullable (pointer to time.Time)
|
||||||
|
publishedAtCol, exists := postsTable.Columns["published_at"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'published_at' not found")
|
||||||
|
}
|
||||||
|
if publishedAtCol.NotNull {
|
||||||
|
t.Error("Column 'published_at' should be nullable (pointer type *time.Time)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// view_count - NOT NULL (primitive int64 with default)
|
||||||
|
viewCountCol, exists := postsTable.Columns["view_count"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'view_count' not found")
|
||||||
|
}
|
||||||
|
if !viewCountCol.NotNull {
|
||||||
|
t.Error("Column 'view_count' should be NOT NULL (primitive int64 type)")
|
||||||
|
}
|
||||||
|
if viewCountCol.Default != "0" {
|
||||||
|
t.Errorf("Expected view_count default '0', got '%v'", viewCountCol.Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify posts table indexes
|
||||||
|
if len(postsTable.Indexes) < 1 {
|
||||||
|
t.Error("Expected at least 1 index on posts table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify comments table
|
||||||
|
if len(commentsTable.Columns) != 6 {
|
||||||
|
t.Errorf("Expected 6 columns in comments table, got %d", len(commentsTable.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
// user_id - nullable (pointer type)
|
||||||
|
userIDCol, exists := commentsTable.Columns["user_id"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'user_id' not found in comments table")
|
||||||
|
}
|
||||||
|
if userIDCol.NotNull {
|
||||||
|
t.Error("Column 'user_id' should be nullable (pointer type *int64)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// post_id - NOT NULL (explicit tag)
|
||||||
|
postIDCol, exists := commentsTable.Columns["post_id"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Column 'post_id' not found in comments table")
|
||||||
|
}
|
||||||
|
if !postIDCol.NotNull {
|
||||||
|
t.Error("Column 'post_id' should be NOT NULL (explicit 'not null' tag)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify foreign key constraints
|
||||||
|
if len(postsTable.Constraints) == 0 {
|
||||||
|
t.Error("Expected at least one constraint on posts table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find FK constraint to users
|
||||||
|
var fkPostsUser *models.Constraint
|
||||||
|
for _, c := range postsTable.Constraints {
|
||||||
|
if c.Type == models.ForeignKeyConstraint && c.ReferencedTable == "users" {
|
||||||
|
fkPostsUser = c
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fkPostsUser != nil {
|
||||||
|
if fkPostsUser.OnDelete != "CASCADE" {
|
||||||
|
t.Errorf("Expected ON DELETE CASCADE for posts->users FK, got '%s'", fkPostsUser.OnDelete)
|
||||||
|
}
|
||||||
|
if fkPostsUser.OnUpdate != "CASCADE" {
|
||||||
|
t.Errorf("Expected ON UPDATE CASCADE for posts->users FK, got '%s'", fkPostsUser.OnUpdate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify comments table constraints
|
||||||
|
if len(commentsTable.Constraints) == 0 {
|
||||||
|
t.Error("Expected at least one constraint on comments table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find FK constraints
|
||||||
|
var fkCommentsPost, fkCommentsUser *models.Constraint
|
||||||
|
for _, c := range commentsTable.Constraints {
|
||||||
|
if c.Type == models.ForeignKeyConstraint {
|
||||||
|
if c.ReferencedTable == "posts" {
|
||||||
|
fkCommentsPost = c
|
||||||
|
} else if c.ReferencedTable == "users" {
|
||||||
|
fkCommentsUser = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fkCommentsPost != nil {
|
||||||
|
if fkCommentsPost.OnDelete != "CASCADE" {
|
||||||
|
t.Errorf("Expected ON DELETE CASCADE for comments->posts FK, got '%s'", fkCommentsPost.OnDelete)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fkCommentsUser != nil {
|
||||||
|
if fkCommentsUser.OnDelete != "SET NULL" {
|
||||||
|
t.Errorf("Expected ON DELETE SET NULL for comments->users FK, got '%s'", fkCommentsUser.OnDelete)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadSchema(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "gorm", "simple.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
schema, err := reader.ReadSchema()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadSchema() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if schema == nil {
|
||||||
|
t.Fatal("ReadSchema() returned nil schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
if schema.Name != "public" {
|
||||||
|
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) != 1 {
|
||||||
|
t.Errorf("Expected 1 table, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadTable(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "gorm", "simple.go"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
table, err := reader.ReadTable()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadTable() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if table == nil {
|
||||||
|
t.Fatal("ReadTable() returned nil table")
|
||||||
|
}
|
||||||
|
|
||||||
|
if table.Name != "users" {
|
||||||
|
t.Errorf("Expected table name 'users', got '%s'", table.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(table.Columns) != 6 {
|
||||||
|
t.Errorf("Expected 6 columns, got %d", len(table.Columns))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_Directory(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "gorm"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if db == nil {
|
||||||
|
t.Fatal("ReadDatabase() returned nil database")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should read both simple.go and complex.go
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
t.Fatal("Expected at least one schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
// Should have at least 3 tables from complex.go (users, posts, comments)
|
||||||
|
// plus 1 from simple.go (users) - but same table name, so may be overwritten
|
||||||
|
if len(schema.Tables) < 3 {
|
||||||
|
t.Errorf("Expected at least 3 tables, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_InvalidPath(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "/nonexistent/file.go",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
_, err := reader.ReadDatabase()
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error for invalid file path")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_EmptyPath(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
_, err := reader.ReadDatabase()
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error for empty file path")
|
||||||
|
}
|
||||||
|
}
|
||||||
203
pkg/readers/graphql/README.md
Normal file
203
pkg/readers/graphql/README.md
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
# GraphQL Schema Reader
|
||||||
|
|
||||||
|
The GraphQL reader parses GraphQL Schema Definition Language (SDL) files and converts them into RelSpec's internal database model.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Standard GraphQL SDL** support (generic, non-framework-specific)
|
||||||
|
- **Type to Table mapping**: GraphQL types become database tables
|
||||||
|
- **Field to Column mapping**: GraphQL fields become table columns
|
||||||
|
- **Enum support**: GraphQL enums are preserved
|
||||||
|
- **Custom scalars**: DateTime, JSON, Date automatically mapped to appropriate SQL types
|
||||||
|
- **Implicit relationships**: Detects relationships from field types
|
||||||
|
- **Many-to-many support**: Creates junction tables for bidirectional array relationships
|
||||||
|
- **Configurable ID mapping**: Choose between bigint (default) or UUID for ID fields
|
||||||
|
|
||||||
|
## Supported GraphQL Features
|
||||||
|
|
||||||
|
### Built-in Scalars
|
||||||
|
- `ID` → bigint (default) or uuid (configurable)
|
||||||
|
- `String` → text
|
||||||
|
- `Int` → integer
|
||||||
|
- `Float` → double precision
|
||||||
|
- `Boolean` → boolean
|
||||||
|
|
||||||
|
### Custom Scalars
|
||||||
|
- `DateTime` → timestamp
|
||||||
|
- `JSON` → jsonb
|
||||||
|
- `Date` → date
|
||||||
|
- `Time` → time
|
||||||
|
- `Decimal` → numeric
|
||||||
|
|
||||||
|
Additional custom scalars can be mapped via metadata.
|
||||||
|
|
||||||
|
### Relationships
|
||||||
|
|
||||||
|
Relationships are inferred from field types:
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
type Post {
|
||||||
|
id: ID!
|
||||||
|
title: String!
|
||||||
|
author: User! # Many-to-one (creates authorId FK column, NOT NULL)
|
||||||
|
reviewer: User # Many-to-one nullable (creates reviewerId FK column, NULL)
|
||||||
|
tags: [Tag!]! # One-to-many or many-to-many (depending on reverse)
|
||||||
|
}
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
posts: [Post!]! # Reverse of Post.author (no FK created)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tag {
|
||||||
|
id: ID!
|
||||||
|
posts: [Post!]! # Many-to-many with Post (creates PostTag junction table)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Relationship Detection Rules:**
|
||||||
|
- Single type reference (`user: User`) → Creates FK column (e.g., `userId`)
|
||||||
|
- Array type reference (`posts: [Post!]!`) → One-to-many reverse (no FK on this table)
|
||||||
|
- Bidirectional arrays → Many-to-many (creates junction table)
|
||||||
|
|
||||||
|
### Enums
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
enum Role {
|
||||||
|
ADMIN
|
||||||
|
USER
|
||||||
|
GUEST
|
||||||
|
}
|
||||||
|
|
||||||
|
type User {
|
||||||
|
role: Role!
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Enums are preserved in the schema and can be used as column types.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/graphql"
|
||||||
|
)
|
||||||
|
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "schema.graphql",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := graphql.NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
```
|
||||||
|
|
||||||
|
### With UUID ID Type
|
||||||
|
|
||||||
|
```go
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "schema.graphql",
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"idType": "uuid", // Map ID scalar to uuid instead of bigint
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := graphql.NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
```
|
||||||
|
|
||||||
|
### With Per-Type ID Mapping
|
||||||
|
|
||||||
|
```go
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "schema.graphql",
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"typeIdMappings": map[string]string{
|
||||||
|
"User": "uuid", // User.id → uuid
|
||||||
|
"Post": "bigint", // Post.id → bigint
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### With Custom Scalar Mappings
|
||||||
|
|
||||||
|
```go
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "schema.graphql",
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"customScalarMappings": map[string]string{
|
||||||
|
"Upload": "bytea",
|
||||||
|
"Decimal": "numeric(10,2)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## CLI Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Convert GraphQL to JSON
|
||||||
|
relspec convert --from graphql --from-path schema.graphql \
|
||||||
|
--to json --to-path schema.json
|
||||||
|
|
||||||
|
# Convert GraphQL to GORM models
|
||||||
|
relspec convert --from graphql --from-path schema.graphql \
|
||||||
|
--to gorm --to-path models/ --package models
|
||||||
|
|
||||||
|
# Convert GraphQL to PostgreSQL SQL
|
||||||
|
relspec convert --from graphql --from-path schema.graphql \
|
||||||
|
--to pgsql --to-path schema.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
## Metadata Options
|
||||||
|
|
||||||
|
| Option | Type | Description | Default |
|
||||||
|
|--------|------|-------------|---------|
|
||||||
|
| `idType` | string | Global ID type mapping ("bigint" or "uuid") | "bigint" |
|
||||||
|
| `typeIdMappings` | map[string]string | Per-type ID mappings | {} |
|
||||||
|
| `customScalarMappings` | map[string]string | Custom scalar to SQL type mappings | {} |
|
||||||
|
| `schemaName` | string | Schema name for all tables | "public" |
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
- Only supports GraphQL SDL (Schema Definition Language), not queries or mutations
|
||||||
|
- Directives are ignored (except for future extensibility)
|
||||||
|
- Interfaces and Unions are not supported
|
||||||
|
- GraphQL's concept of "schema" is different from database schemas; all types go into a single database schema (default: "public")
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
**Input** (`schema.graphql`):
|
||||||
|
```graphql
|
||||||
|
scalar DateTime
|
||||||
|
|
||||||
|
enum Role {
|
||||||
|
ADMIN
|
||||||
|
USER
|
||||||
|
}
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
email: String!
|
||||||
|
role: Role!
|
||||||
|
createdAt: DateTime!
|
||||||
|
posts: [Post!]!
|
||||||
|
}
|
||||||
|
|
||||||
|
type Post {
|
||||||
|
id: ID!
|
||||||
|
title: String!
|
||||||
|
content: String
|
||||||
|
published: Boolean!
|
||||||
|
author: User!
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Result**: Database with:
|
||||||
|
- 2 tables: `User` and `Post`
|
||||||
|
- `Post` table has `authorId` foreign key to `User.id`
|
||||||
|
- `Role` enum with values: ADMIN, USER
|
||||||
|
- Custom scalar `DateTime` mapped to `timestamp`
|
||||||
279
pkg/readers/graphql/reader.go
Normal file
279
pkg/readers/graphql/reader.go
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Reader struct {
|
||||||
|
options *readers.ReaderOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReader(options *readers.ReaderOptions) *Reader {
|
||||||
|
return &Reader{
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) ReadDatabase() (*models.Database, error) {
|
||||||
|
if r.options.FilePath == "" {
|
||||||
|
return nil, fmt.Errorf("file path is required for GraphQL reader")
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := os.ReadFile(r.options.FilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.parseGraphQL(string(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) ReadSchema() (*models.Schema, error) {
|
||||||
|
db, err := r.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
return nil, fmt.Errorf("no schemas found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return db.Schemas[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) ReadTable() (*models.Table, error) {
|
||||||
|
schema, err := r.ReadSchema()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) == 0 {
|
||||||
|
return nil, fmt.Errorf("no tables found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.Tables[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type parseContext struct {
|
||||||
|
inType bool
|
||||||
|
inEnum bool
|
||||||
|
currentType string
|
||||||
|
typeLines []string
|
||||||
|
currentEnum string
|
||||||
|
enumLines []string
|
||||||
|
customScalars map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) parseGraphQL(content string) (*models.Database, error) {
|
||||||
|
dbName := "database"
|
||||||
|
if r.options.Metadata != nil {
|
||||||
|
if name, ok := r.options.Metadata["name"].(string); ok {
|
||||||
|
dbName = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db := models.InitDatabase(dbName)
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
|
||||||
|
ctx := &parseContext{
|
||||||
|
customScalars: make(map[string]bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
// First pass: collect custom scalars and enums
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(content))
|
||||||
|
scalarRegex := regexp.MustCompile(`^\s*scalar\s+(\w+)`)
|
||||||
|
enumRegex := regexp.MustCompile(`^\s*enum\s+(\w+)\s*\{`)
|
||||||
|
closingBraceRegex := regexp.MustCompile(`^\s*\}`)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches := scalarRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
ctx.customScalars[matches[1]] = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches := enumRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
ctx.inEnum = true
|
||||||
|
ctx.currentEnum = matches[1]
|
||||||
|
ctx.enumLines = []string{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if closingBraceRegex.MatchString(trimmed) && ctx.inEnum {
|
||||||
|
r.parseEnum(ctx.currentEnum, ctx.enumLines, schema)
|
||||||
|
// Add enum name to custom scalars for type detection
|
||||||
|
ctx.customScalars[ctx.currentEnum] = true
|
||||||
|
ctx.inEnum = false
|
||||||
|
ctx.currentEnum = ""
|
||||||
|
ctx.enumLines = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.inEnum {
|
||||||
|
ctx.enumLines = append(ctx.enumLines, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, fmt.Errorf("scanner error: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass: parse types
|
||||||
|
scanner = bufio.NewScanner(strings.NewReader(content))
|
||||||
|
typeRegex := regexp.MustCompile(`^\s*type\s+(\w+)\s*\{`)
|
||||||
|
ctx.inType = false
|
||||||
|
ctx.inEnum = false
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches := typeRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
ctx.inType = true
|
||||||
|
ctx.currentType = matches[1]
|
||||||
|
ctx.typeLines = []string{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if closingBraceRegex.MatchString(trimmed) && ctx.inType {
|
||||||
|
if err := r.parseType(ctx.currentType, ctx.typeLines, schema, ctx); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse type %s: %w", ctx.currentType, err)
|
||||||
|
}
|
||||||
|
ctx.inType = false
|
||||||
|
ctx.currentType = ""
|
||||||
|
ctx.typeLines = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.inType {
|
||||||
|
ctx.typeLines = append(ctx.typeLines, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, fmt.Errorf("scanner error: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
// Third pass: detect and create relationships
|
||||||
|
if err := r.detectAndCreateRelationships(schema, ctx); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create relationships: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldInfo struct {
|
||||||
|
name string
|
||||||
|
typeName string
|
||||||
|
isArray bool
|
||||||
|
isNullable bool
|
||||||
|
innerNullable bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) parseType(typeName string, lines []string, schema *models.Schema, ctx *parseContext) error {
|
||||||
|
table := models.InitTable(typeName, schema.Name)
|
||||||
|
table.Metadata = make(map[string]any)
|
||||||
|
|
||||||
|
// Store field info for relationship detection
|
||||||
|
relationFields := make(map[string]*fieldInfo)
|
||||||
|
|
||||||
|
fieldRegex := regexp.MustCompile(`^\s*(\w+)\s*:\s*(\[)?(\w+)(!)?(\])?(!)?\s*`)
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
matches := fieldRegex.FindStringSubmatch(trimmed)
|
||||||
|
if matches == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := matches[1]
|
||||||
|
hasOpenBracket := matches[2] == "["
|
||||||
|
baseType := matches[3]
|
||||||
|
innerNonNull := matches[4] == "!"
|
||||||
|
hasCloseBracket := matches[5] == "]"
|
||||||
|
outerNonNull := matches[6] == "!"
|
||||||
|
|
||||||
|
isArray := hasOpenBracket && hasCloseBracket
|
||||||
|
|
||||||
|
// Determine if this is a scalar or a relation
|
||||||
|
if r.isScalarType(baseType, ctx) {
|
||||||
|
// This is a scalar field
|
||||||
|
column := models.InitColumn(fieldName, table.Name, schema.Name)
|
||||||
|
column.Type = r.graphQLTypeToSQL(baseType, fieldName, typeName)
|
||||||
|
|
||||||
|
if isArray {
|
||||||
|
// Array of scalars: use array type
|
||||||
|
column.Type += "[]"
|
||||||
|
column.NotNull = outerNonNull
|
||||||
|
} else {
|
||||||
|
column.NotNull = !isArray && innerNonNull
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is a primary key (convention: field named "id")
|
||||||
|
if fieldName == "id" {
|
||||||
|
column.IsPrimaryKey = true
|
||||||
|
column.AutoIncrement = true
|
||||||
|
}
|
||||||
|
|
||||||
|
table.Columns[fieldName] = column
|
||||||
|
} else {
|
||||||
|
// This is a relation field - store for later processing
|
||||||
|
relationFields[fieldName] = &fieldInfo{
|
||||||
|
name: fieldName,
|
||||||
|
typeName: baseType,
|
||||||
|
isArray: isArray,
|
||||||
|
isNullable: !innerNonNull && !isArray,
|
||||||
|
innerNullable: !innerNonNull && isArray,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store relation fields in table metadata for relationship detection
|
||||||
|
if len(relationFields) > 0 {
|
||||||
|
table.Metadata["relationFields"] = relationFields
|
||||||
|
}
|
||||||
|
|
||||||
|
schema.Tables = append(schema.Tables, table)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) parseEnum(enumName string, lines []string, schema *models.Schema) {
|
||||||
|
enum := &models.Enum{
|
||||||
|
Name: enumName,
|
||||||
|
Schema: schema.Name,
|
||||||
|
Values: make([]string, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Enum values are simple identifiers
|
||||||
|
enum.Values = append(enum.Values, trimmed)
|
||||||
|
}
|
||||||
|
|
||||||
|
schema.Enums = append(schema.Enums, enum)
|
||||||
|
}
|
||||||
362
pkg/readers/graphql/reader_test.go
Normal file
362
pkg/readers/graphql/reader_test.go
Normal file
@@ -0,0 +1,362 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_Simple(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "simple.graphql"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
t.Fatal("Expected at least one schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
if schema.Name != "public" {
|
||||||
|
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) != 1 {
|
||||||
|
t.Fatalf("Expected 1 table, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
|
||||||
|
userTable := schema.Tables[0]
|
||||||
|
if userTable.Name != "User" {
|
||||||
|
t.Errorf("Expected table name 'User', got '%s'", userTable.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify columns
|
||||||
|
expectedColumns := map[string]struct {
|
||||||
|
sqlType string
|
||||||
|
notNull bool
|
||||||
|
isPK bool
|
||||||
|
}{
|
||||||
|
"id": {"bigint", true, true},
|
||||||
|
"email": {"text", true, false},
|
||||||
|
"name": {"text", false, false},
|
||||||
|
"age": {"integer", false, false},
|
||||||
|
"active": {"boolean", true, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(userTable.Columns) != len(expectedColumns) {
|
||||||
|
t.Fatalf("Expected %d columns, got %d", len(expectedColumns), len(userTable.Columns))
|
||||||
|
}
|
||||||
|
|
||||||
|
for colName, expected := range expectedColumns {
|
||||||
|
col, exists := userTable.Columns[colName]
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("Expected column '%s' not found", colName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if col.Type != expected.sqlType {
|
||||||
|
t.Errorf("Column '%s': expected type '%s', got '%s'", colName, expected.sqlType, col.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
if col.NotNull != expected.notNull {
|
||||||
|
t.Errorf("Column '%s': expected NotNull=%v, got %v", colName, expected.notNull, col.NotNull)
|
||||||
|
}
|
||||||
|
|
||||||
|
if col.IsPrimaryKey != expected.isPK {
|
||||||
|
t.Errorf("Column '%s': expected IsPrimaryKey=%v, got %v", colName, expected.isPK, col.IsPrimaryKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_WithRelations(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "relations.graphql"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
|
||||||
|
if len(schema.Tables) != 2 {
|
||||||
|
t.Fatalf("Expected 2 tables, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find Post table (should have FK to User)
|
||||||
|
var postTable *models.Table
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if table.Name == "Post" {
|
||||||
|
postTable = table
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if postTable == nil {
|
||||||
|
t.Fatal("Post table not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify authorId FK column was created
|
||||||
|
authorIdCol, exists := postTable.Columns["authorId"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Expected 'authorId' FK column not found in Post table")
|
||||||
|
}
|
||||||
|
|
||||||
|
if authorIdCol.Type != "bigint" {
|
||||||
|
t.Errorf("Expected authorId type 'bigint', got '%s'", authorIdCol.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !authorIdCol.NotNull {
|
||||||
|
t.Error("Expected authorId to be NOT NULL")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify FK constraint
|
||||||
|
fkConstraintFound := false
|
||||||
|
for _, constraint := range postTable.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
if constraint.ReferencedTable == "User" && len(constraint.Columns) > 0 && constraint.Columns[0] == "authorId" {
|
||||||
|
fkConstraintFound = true
|
||||||
|
if constraint.OnDelete != "CASCADE" {
|
||||||
|
t.Errorf("Expected OnDelete CASCADE, got %s", constraint.OnDelete)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fkConstraintFound {
|
||||||
|
t.Error("Foreign key constraint from Post to User not found")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_WithEnums(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "enums.graphql"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
|
||||||
|
if len(schema.Enums) != 1 {
|
||||||
|
t.Fatalf("Expected 1 enum, got %d", len(schema.Enums))
|
||||||
|
}
|
||||||
|
|
||||||
|
roleEnum := schema.Enums[0]
|
||||||
|
if roleEnum.Name != "Role" {
|
||||||
|
t.Errorf("Expected enum name 'Role', got '%s'", roleEnum.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedValues := []string{"ADMIN", "USER", "GUEST"}
|
||||||
|
if len(roleEnum.Values) != len(expectedValues) {
|
||||||
|
t.Fatalf("Expected %d enum values, got %d", len(expectedValues), len(roleEnum.Values))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, expected := range expectedValues {
|
||||||
|
if roleEnum.Values[i] != expected {
|
||||||
|
t.Errorf("Expected enum value '%s' at index %d, got '%s'", expected, i, roleEnum.Values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify role column in User table
|
||||||
|
userTable := schema.Tables[0]
|
||||||
|
roleCol, exists := userTable.Columns["role"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Expected 'role' column not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if roleCol.Type != "Role" {
|
||||||
|
t.Errorf("Expected role type 'Role', got '%s'", roleCol.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_CustomScalars(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "custom_scalars.graphql"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
userTable := schema.Tables[0]
|
||||||
|
|
||||||
|
// Verify custom scalar mappings
|
||||||
|
expectedTypes := map[string]string{
|
||||||
|
"createdAt": "timestamp",
|
||||||
|
"metadata": "jsonb",
|
||||||
|
"birthDate": "date",
|
||||||
|
}
|
||||||
|
|
||||||
|
for colName, expectedType := range expectedTypes {
|
||||||
|
col, exists := userTable.Columns[colName]
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("Expected column '%s' not found", colName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if col.Type != expectedType {
|
||||||
|
t.Errorf("Column '%s': expected type '%s', got '%s'", colName, expectedType, col.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_UUIDMetadata(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "simple.graphql"),
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"idType": "uuid",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
userTable := schema.Tables[0]
|
||||||
|
|
||||||
|
idCol, exists := userTable.Columns["id"]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("Expected 'id' column not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if idCol.Type != "uuid" {
|
||||||
|
t.Errorf("Expected id type 'uuid' with metadata, got '%s'", idCol.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadDatabase_Complex(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "complex.graphql"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadDatabase() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
schema := db.Schemas[0]
|
||||||
|
|
||||||
|
// Should have 5 tables: User, Profile, Post, Tag, and PostTag (join table)
|
||||||
|
expectedTableCount := 5
|
||||||
|
if len(schema.Tables) != expectedTableCount {
|
||||||
|
t.Fatalf("Expected %d tables, got %d", expectedTableCount, len(schema.Tables))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify PostTag join table exists (many-to-many between Post and Tag)
|
||||||
|
var joinTable *models.Table
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if table.Name == "PostTag" {
|
||||||
|
joinTable = table
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if joinTable == nil {
|
||||||
|
t.Fatal("Expected PostTag join table not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify join table has both FK columns
|
||||||
|
if _, exists := joinTable.Columns["postId"]; !exists {
|
||||||
|
t.Error("Expected 'postId' column in PostTag join table")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := joinTable.Columns["tagId"]; !exists {
|
||||||
|
t.Error("Expected 'tagId' column in PostTag join table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify composite primary key
|
||||||
|
pkFound := false
|
||||||
|
for _, constraint := range joinTable.Constraints {
|
||||||
|
if constraint.Type == models.PrimaryKeyConstraint {
|
||||||
|
if len(constraint.Columns) == 2 {
|
||||||
|
pkFound = true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !pkFound {
|
||||||
|
t.Error("Expected composite primary key in PostTag join table")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadSchema(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "simple.graphql"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
schema, err := reader.ReadSchema()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadSchema() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if schema.Name != "public" {
|
||||||
|
t.Errorf("Expected schema name 'public', got '%s'", schema.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) != 1 {
|
||||||
|
t.Errorf("Expected 1 table, got %d", len(schema.Tables))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_ReadTable(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "graphql", "simple.graphql"),
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
table, err := reader.ReadTable()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadTable() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if table.Name != "User" {
|
||||||
|
t.Errorf("Expected table name 'User', got '%s'", table.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_InvalidPath(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "/nonexistent/path.graphql",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
_, err := reader.ReadDatabase()
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error for invalid path, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader_EmptyPath(t *testing.T) {
|
||||||
|
opts := &readers.ReaderOptions{
|
||||||
|
FilePath: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := NewReader(opts)
|
||||||
|
_, err := reader.ReadDatabase()
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error for empty path, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
225
pkg/readers/graphql/relationships.go
Normal file
225
pkg/readers/graphql/relationships.go
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Reader) detectAndCreateRelationships(schema *models.Schema, ctx *parseContext) error {
|
||||||
|
// Build table lookup map
|
||||||
|
tableMap := make(map[string]*models.Table)
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
tableMap[table.Name] = table
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each table's relation fields
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
relationFields, ok := table.Metadata["relationFields"].(map[string]*fieldInfo)
|
||||||
|
if !ok || len(relationFields) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for fieldName, fieldInfo := range relationFields {
|
||||||
|
targetTable, exists := tableMap[fieldInfo.typeName]
|
||||||
|
if !exists {
|
||||||
|
// Referenced type doesn't exist - might be an interface/union, skip
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if fieldInfo.isArray {
|
||||||
|
// This is a one-to-many or many-to-many reverse side
|
||||||
|
// Check if target table has a reverse array field
|
||||||
|
if r.hasReverseArrayField(targetTable, table.Name) {
|
||||||
|
// Bidirectional array = many-to-many
|
||||||
|
// Only create join table once (lexicographically first table creates it)
|
||||||
|
if table.Name < targetTable.Name {
|
||||||
|
if err := r.createManyToManyJoinTable(schema, table, targetTable, fieldName, tableMap); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// For one-to-many, no action needed (FK is on the other table)
|
||||||
|
} else {
|
||||||
|
// This is a many-to-one or one-to-one
|
||||||
|
// Create FK column on this table
|
||||||
|
if err := r.createForeignKeyColumn(table, targetTable, fieldName, fieldInfo.isNullable, schema); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up metadata
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
delete(table.Metadata, "relationFields")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) hasReverseArrayField(table *models.Table, targetTypeName string) bool {
|
||||||
|
relationFields, ok := table.Metadata["relationFields"].(map[string]*fieldInfo)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fieldInfo := range relationFields {
|
||||||
|
if fieldInfo.typeName == targetTypeName && fieldInfo.isArray {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) createForeignKeyColumn(fromTable, toTable *models.Table, fieldName string, nullable bool, schema *models.Schema) error {
|
||||||
|
// Get primary key from target table
|
||||||
|
pkCol := toTable.GetPrimaryKey()
|
||||||
|
if pkCol == nil {
|
||||||
|
return fmt.Errorf("target table %s has no primary key for relationship", toTable.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create FK column name: {fieldName}Id
|
||||||
|
fkColName := fieldName + "Id"
|
||||||
|
|
||||||
|
// Check if column already exists (shouldn't happen but be safe)
|
||||||
|
if _, exists := fromTable.Columns[fkColName]; exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create FK column
|
||||||
|
fkCol := models.InitColumn(fkColName, fromTable.Name, schema.Name)
|
||||||
|
fkCol.Type = pkCol.Type
|
||||||
|
fkCol.NotNull = !nullable
|
||||||
|
|
||||||
|
fromTable.Columns[fkColName] = fkCol
|
||||||
|
|
||||||
|
// Create FK constraint
|
||||||
|
constraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("fk_%s_%s", fromTable.Name, fieldName),
|
||||||
|
models.ForeignKeyConstraint,
|
||||||
|
)
|
||||||
|
constraint.Schema = schema.Name
|
||||||
|
constraint.Table = fromTable.Name
|
||||||
|
constraint.Columns = []string{fkColName}
|
||||||
|
constraint.ReferencedSchema = schema.Name
|
||||||
|
constraint.ReferencedTable = toTable.Name
|
||||||
|
constraint.ReferencedColumns = []string{pkCol.Name}
|
||||||
|
constraint.OnDelete = "CASCADE"
|
||||||
|
constraint.OnUpdate = "RESTRICT"
|
||||||
|
|
||||||
|
fromTable.Constraints[constraint.Name] = constraint
|
||||||
|
|
||||||
|
// Create relationship
|
||||||
|
relationship := models.InitRelationship(
|
||||||
|
fmt.Sprintf("rel_%s_%s", fromTable.Name, fieldName),
|
||||||
|
models.OneToMany,
|
||||||
|
)
|
||||||
|
relationship.FromTable = fromTable.Name
|
||||||
|
relationship.FromSchema = schema.Name
|
||||||
|
relationship.FromColumns = []string{fkColName}
|
||||||
|
relationship.ToTable = toTable.Name
|
||||||
|
relationship.ToSchema = schema.Name
|
||||||
|
relationship.ToColumns = []string{pkCol.Name}
|
||||||
|
relationship.ForeignKey = constraint.Name
|
||||||
|
|
||||||
|
fromTable.Relationships[relationship.Name] = relationship
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) createManyToManyJoinTable(schema *models.Schema, table1, table2 *models.Table, fieldName string, tableMap map[string]*models.Table) error {
|
||||||
|
// Create join table name
|
||||||
|
joinTableName := table1.Name + table2.Name
|
||||||
|
|
||||||
|
// Check if join table already exists
|
||||||
|
if _, exists := tableMap[joinTableName]; exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get primary keys
|
||||||
|
pk1 := table1.GetPrimaryKey()
|
||||||
|
pk2 := table2.GetPrimaryKey()
|
||||||
|
|
||||||
|
if pk1 == nil || pk2 == nil {
|
||||||
|
return fmt.Errorf("cannot create many-to-many: tables must have primary keys")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create join table
|
||||||
|
joinTable := models.InitTable(joinTableName, schema.Name)
|
||||||
|
|
||||||
|
// Create FK column for table1
|
||||||
|
fkCol1Name := strings.ToLower(table1.Name) + "Id"
|
||||||
|
fkCol1 := models.InitColumn(fkCol1Name, joinTable.Name, schema.Name)
|
||||||
|
fkCol1.Type = pk1.Type
|
||||||
|
fkCol1.NotNull = true
|
||||||
|
joinTable.Columns[fkCol1Name] = fkCol1
|
||||||
|
|
||||||
|
// Create FK column for table2
|
||||||
|
fkCol2Name := strings.ToLower(table2.Name) + "Id"
|
||||||
|
fkCol2 := models.InitColumn(fkCol2Name, joinTable.Name, schema.Name)
|
||||||
|
fkCol2.Type = pk2.Type
|
||||||
|
fkCol2.NotNull = true
|
||||||
|
joinTable.Columns[fkCol2Name] = fkCol2
|
||||||
|
|
||||||
|
// Create composite primary key
|
||||||
|
pkConstraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("pk_%s", joinTableName),
|
||||||
|
models.PrimaryKeyConstraint,
|
||||||
|
)
|
||||||
|
pkConstraint.Schema = schema.Name
|
||||||
|
pkConstraint.Table = joinTable.Name
|
||||||
|
pkConstraint.Columns = []string{fkCol1Name, fkCol2Name}
|
||||||
|
joinTable.Constraints[pkConstraint.Name] = pkConstraint
|
||||||
|
|
||||||
|
// Create FK constraint to table1
|
||||||
|
fk1 := models.InitConstraint(
|
||||||
|
fmt.Sprintf("fk_%s_%s", joinTableName, table1.Name),
|
||||||
|
models.ForeignKeyConstraint,
|
||||||
|
)
|
||||||
|
fk1.Schema = schema.Name
|
||||||
|
fk1.Table = joinTable.Name
|
||||||
|
fk1.Columns = []string{fkCol1Name}
|
||||||
|
fk1.ReferencedSchema = schema.Name
|
||||||
|
fk1.ReferencedTable = table1.Name
|
||||||
|
fk1.ReferencedColumns = []string{pk1.Name}
|
||||||
|
fk1.OnDelete = "CASCADE"
|
||||||
|
fk1.OnUpdate = "RESTRICT"
|
||||||
|
joinTable.Constraints[fk1.Name] = fk1
|
||||||
|
|
||||||
|
// Create FK constraint to table2
|
||||||
|
fk2 := models.InitConstraint(
|
||||||
|
fmt.Sprintf("fk_%s_%s", joinTableName, table2.Name),
|
||||||
|
models.ForeignKeyConstraint,
|
||||||
|
)
|
||||||
|
fk2.Schema = schema.Name
|
||||||
|
fk2.Table = joinTable.Name
|
||||||
|
fk2.Columns = []string{fkCol2Name}
|
||||||
|
fk2.ReferencedSchema = schema.Name
|
||||||
|
fk2.ReferencedTable = table2.Name
|
||||||
|
fk2.ReferencedColumns = []string{pk2.Name}
|
||||||
|
fk2.OnDelete = "CASCADE"
|
||||||
|
fk2.OnUpdate = "RESTRICT"
|
||||||
|
joinTable.Constraints[fk2.Name] = fk2
|
||||||
|
|
||||||
|
// Create relationships
|
||||||
|
rel1 := models.InitRelationship(
|
||||||
|
fmt.Sprintf("rel_%s_%s_%s", joinTableName, table1.Name, table2.Name),
|
||||||
|
models.ManyToMany,
|
||||||
|
)
|
||||||
|
rel1.FromTable = table1.Name
|
||||||
|
rel1.FromSchema = schema.Name
|
||||||
|
rel1.ToTable = table2.Name
|
||||||
|
rel1.ToSchema = schema.Name
|
||||||
|
rel1.ThroughTable = joinTableName
|
||||||
|
rel1.ThroughSchema = schema.Name
|
||||||
|
joinTable.Relationships[rel1.Name] = rel1
|
||||||
|
|
||||||
|
// Add join table to schema
|
||||||
|
schema.Tables = append(schema.Tables, joinTable)
|
||||||
|
tableMap[joinTableName] = joinTable
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
97
pkg/readers/graphql/type_mapping.go
Normal file
97
pkg/readers/graphql/type_mapping.go
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
func (r *Reader) isScalarType(typeName string, ctx *parseContext) bool {
|
||||||
|
// Built-in GraphQL scalars
|
||||||
|
builtInScalars := map[string]bool{
|
||||||
|
"ID": true,
|
||||||
|
"String": true,
|
||||||
|
"Int": true,
|
||||||
|
"Float": true,
|
||||||
|
"Boolean": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if builtInScalars[typeName] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom scalars declared in the schema
|
||||||
|
if ctx.customScalars[typeName] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Common custom scalars (even if not declared)
|
||||||
|
commonCustomScalars := map[string]bool{
|
||||||
|
"DateTime": true,
|
||||||
|
"JSON": true,
|
||||||
|
"Date": true,
|
||||||
|
"Time": true,
|
||||||
|
"Upload": true,
|
||||||
|
"Decimal": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return commonCustomScalars[typeName]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) graphQLTypeToSQL(gqlType string, fieldName string, typeName string) string {
|
||||||
|
// Check for ID type with configurable mapping
|
||||||
|
if gqlType == "ID" {
|
||||||
|
// Check metadata for ID type preference
|
||||||
|
if r.options.Metadata != nil {
|
||||||
|
// Global idType setting
|
||||||
|
if idType, ok := r.options.Metadata["idType"].(string); ok {
|
||||||
|
if idType == "uuid" {
|
||||||
|
return "uuid"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per-type ID mapping
|
||||||
|
if typeIdMappings, ok := r.options.Metadata["typeIdMappings"].(map[string]string); ok {
|
||||||
|
if idType, ok := typeIdMappings[typeName]; ok {
|
||||||
|
if idType == "uuid" {
|
||||||
|
return "uuid"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "bigint" // Default
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom scalar mappings
|
||||||
|
if r.options.Metadata != nil {
|
||||||
|
if customMappings, ok := r.options.Metadata["customScalarMappings"].(map[string]string); ok {
|
||||||
|
if sqlType, ok := customMappings[gqlType]; ok {
|
||||||
|
return sqlType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Built-in custom scalar mappings
|
||||||
|
customScalars := map[string]string{
|
||||||
|
"DateTime": "timestamp",
|
||||||
|
"JSON": "jsonb",
|
||||||
|
"Date": "date",
|
||||||
|
"Time": "time",
|
||||||
|
"Decimal": "numeric",
|
||||||
|
"Upload": "bytea",
|
||||||
|
}
|
||||||
|
if sqlType, ok := customScalars[gqlType]; ok {
|
||||||
|
return sqlType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard scalar mappings
|
||||||
|
typeMap := map[string]string{
|
||||||
|
"String": "text",
|
||||||
|
"Int": "integer",
|
||||||
|
"Float": "double precision",
|
||||||
|
"Boolean": "boolean",
|
||||||
|
}
|
||||||
|
|
||||||
|
if sqlType, ok := typeMap[gqlType]; ok {
|
||||||
|
return sqlType
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not a known scalar, assume it's an enum or custom type
|
||||||
|
// Return as-is (might be an enum)
|
||||||
|
return gqlType
|
||||||
|
}
|
||||||
152
pkg/readers/json/README.md
Normal file
152
pkg/readers/json/README.md
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
# JSON Reader
|
||||||
|
|
||||||
|
Reads database schema definitions from JSON files.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The JSON Reader parses JSON files that define database schemas in RelSpec's canonical JSON format and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Reads RelSpec's standard JSON schema format
|
||||||
|
- Supports complete schema representation including:
|
||||||
|
- Databases and schemas
|
||||||
|
- Tables, columns, and data types
|
||||||
|
- Constraints (PK, FK, unique, check)
|
||||||
|
- Indexes
|
||||||
|
- Relationships
|
||||||
|
- Views and sequences
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/schema.json",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := json.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read JSON schema and convert to GORM models
|
||||||
|
relspec --input json --in-file schema.json --output gorm --out-file models.go
|
||||||
|
|
||||||
|
# Convert JSON to PostgreSQL DDL
|
||||||
|
relspec --input json --in-file database.json --output pgsql --out-file schema.sql
|
||||||
|
|
||||||
|
# Transform JSON to YAML
|
||||||
|
relspec --input json --in-file schema.json --output yaml --out-file schema.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example JSON Schema
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "myapp",
|
||||||
|
"database_type": "postgresql",
|
||||||
|
"schemas": [
|
||||||
|
{
|
||||||
|
"name": "public",
|
||||||
|
"tables": [
|
||||||
|
{
|
||||||
|
"name": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"columns": {
|
||||||
|
"id": {
|
||||||
|
"name": "id",
|
||||||
|
"type": "bigint",
|
||||||
|
"not_null": true,
|
||||||
|
"is_primary_key": true,
|
||||||
|
"auto_increment": true,
|
||||||
|
"sequence": 1
|
||||||
|
},
|
||||||
|
"username": {
|
||||||
|
"name": "username",
|
||||||
|
"type": "varchar",
|
||||||
|
"length": 50,
|
||||||
|
"not_null": true,
|
||||||
|
"sequence": 2
|
||||||
|
},
|
||||||
|
"email": {
|
||||||
|
"name": "email",
|
||||||
|
"type": "varchar",
|
||||||
|
"length": 100,
|
||||||
|
"not_null": true,
|
||||||
|
"sequence": 3
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"constraints": {
|
||||||
|
"pk_users": {
|
||||||
|
"name": "pk_users",
|
||||||
|
"type": "PRIMARY KEY",
|
||||||
|
"columns": ["id"]
|
||||||
|
},
|
||||||
|
"uq_users_username": {
|
||||||
|
"name": "uq_users_username",
|
||||||
|
"type": "UNIQUE",
|
||||||
|
"columns": ["username"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"indexes": {
|
||||||
|
"idx_users_email": {
|
||||||
|
"name": "idx_users_email",
|
||||||
|
"columns": ["email"],
|
||||||
|
"unique": false,
|
||||||
|
"type": "btree"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Schema Structure
|
||||||
|
|
||||||
|
The JSON format follows RelSpec's internal model structure:
|
||||||
|
|
||||||
|
- `Database` - Top-level container
|
||||||
|
- `name` - Database name
|
||||||
|
- `database_type` - Database system (postgresql, mysql, etc.)
|
||||||
|
- `schemas[]` - Array of schemas
|
||||||
|
|
||||||
|
- `Schema` - Schema/namespace
|
||||||
|
- `name` - Schema name
|
||||||
|
- `tables[]` - Array of tables
|
||||||
|
- `views[]` - Array of views
|
||||||
|
- `sequences[]` - Array of sequences
|
||||||
|
|
||||||
|
- `Table` - Table definition
|
||||||
|
- `name` - Table name
|
||||||
|
- `columns{}` - Map of columns
|
||||||
|
- `constraints{}` - Map of constraints
|
||||||
|
- `indexes{}` - Map of indexes
|
||||||
|
- `relationships{}` - Map of relationships
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- This is RelSpec's native interchange format
|
||||||
|
- Preserves complete schema information
|
||||||
|
- Ideal for version control and schema documentation
|
||||||
|
- Can be used as an intermediate format for transformations
|
||||||
138
pkg/readers/pgsql/README.md
Normal file
138
pkg/readers/pgsql/README.md
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
# PostgreSQL Reader
|
||||||
|
|
||||||
|
Reads schema information directly from a live PostgreSQL database.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The PostgreSQL Reader connects to a PostgreSQL database and introspects its schema, extracting complete information about tables, columns, constraints, indexes, views, and sequences.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Direct database introspection
|
||||||
|
- Extracts complete schema information including:
|
||||||
|
- Tables and columns
|
||||||
|
- Primary keys, foreign keys, unique constraints, check constraints
|
||||||
|
- Indexes
|
||||||
|
- Views
|
||||||
|
- Sequences
|
||||||
|
- Supports multiple schemas
|
||||||
|
- Captures constraint actions (ON DELETE, ON UPDATE)
|
||||||
|
- Derives relationships from foreign keys
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
ConnectionString: "postgres://user:password@localhost:5432/mydb?sslmode=disable",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := pgsql.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Database: %s\n", db.Name)
|
||||||
|
fmt.Printf("Schemas: %d\n", len(db.Schemas))
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
fmt.Printf(" Schema: %s, Tables: %d\n", schema.Name, len(schema.Tables))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Inspect PostgreSQL database and export to JSON
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://user:password@localhost:5432/mydb" \
|
||||||
|
--output json \
|
||||||
|
--out-file schema.json
|
||||||
|
|
||||||
|
# Generate GORM models from PostgreSQL database
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://user:password@localhost:5432/mydb" \
|
||||||
|
--output gorm \
|
||||||
|
--out-file models.go
|
||||||
|
|
||||||
|
# Export database structure to YAML
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb?sslmode=disable" \
|
||||||
|
--output yaml \
|
||||||
|
--out-file schema.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Connection String Format
|
||||||
|
|
||||||
|
The reader uses PostgreSQL connection strings in the format:
|
||||||
|
|
||||||
|
```
|
||||||
|
postgres://username:password@hostname:port/database?parameters
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
```
|
||||||
|
postgres://localhost/mydb
|
||||||
|
postgres://user:pass@localhost:5432/mydb
|
||||||
|
postgres://user@localhost/mydb?sslmode=disable
|
||||||
|
postgres://user:pass@db.example.com:5432/production?sslmode=require
|
||||||
|
```
|
||||||
|
|
||||||
|
## Extracted Information
|
||||||
|
|
||||||
|
### Tables
|
||||||
|
- Table name and schema
|
||||||
|
- Comments/descriptions
|
||||||
|
- All columns with data types, nullable, defaults
|
||||||
|
- Sequences
|
||||||
|
|
||||||
|
### Columns
|
||||||
|
- Column name, data type, length/precision
|
||||||
|
- NULL/NOT NULL constraints
|
||||||
|
- Default values
|
||||||
|
- Auto-increment information
|
||||||
|
- Primary key designation
|
||||||
|
|
||||||
|
### Constraints
|
||||||
|
- Primary keys
|
||||||
|
- Foreign keys (with ON DELETE/UPDATE actions)
|
||||||
|
- Unique constraints
|
||||||
|
- Check constraints
|
||||||
|
|
||||||
|
### Indexes
|
||||||
|
- Index name and type (btree, hash, gist, gin, etc.)
|
||||||
|
- Columns in index
|
||||||
|
- Unique/non-unique
|
||||||
|
- Partial indexes
|
||||||
|
|
||||||
|
### Views
|
||||||
|
- View definitions
|
||||||
|
- Column information
|
||||||
|
|
||||||
|
### Sequences
|
||||||
|
- Sequence properties
|
||||||
|
- Associated tables
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Requires PostgreSQL connection permissions
|
||||||
|
- Reads all non-system schemas (excludes pg_catalog, information_schema, pg_toast)
|
||||||
|
- Captures PostgreSQL-specific data types
|
||||||
|
- Automatically maps PostgreSQL types to canonical types
|
||||||
|
- Preserves relationship metadata for downstream conversion
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Go library: `github.com/jackc/pgx/v5`
|
||||||
|
- Database user must have SELECT permissions on system catalogs
|
||||||
103
pkg/readers/prisma/README.md
Normal file
103
pkg/readers/prisma/README.md
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# Prisma Reader
|
||||||
|
|
||||||
|
Reads Prisma schema files and extracts database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Prisma Reader parses `.prisma` schema files that define database models using Prisma's schema language and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Parses Prisma schema syntax
|
||||||
|
- Extracts models, fields, and relationships
|
||||||
|
- Supports Prisma attributes and directives
|
||||||
|
- Handles enums and composite types
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/prisma"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/schema.prisma",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := prisma.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read Prisma schema and convert to JSON
|
||||||
|
relspec --input prisma --in-file schema.prisma --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert Prisma to GORM models
|
||||||
|
relspec --input prisma --in-file schema.prisma --output gorm --out-file models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example Prisma Schema
|
||||||
|
|
||||||
|
```prisma
|
||||||
|
datasource db {
|
||||||
|
provider = "postgresql"
|
||||||
|
url = env("DATABASE_URL")
|
||||||
|
}
|
||||||
|
|
||||||
|
generator client {
|
||||||
|
provider = "prisma-client-js"
|
||||||
|
}
|
||||||
|
|
||||||
|
model User {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
username String @unique @db.VarChar(50)
|
||||||
|
email String @db.VarChar(100)
|
||||||
|
createdAt DateTime @default(now()) @map("created_at")
|
||||||
|
|
||||||
|
posts Post[]
|
||||||
|
|
||||||
|
@@map("users")
|
||||||
|
}
|
||||||
|
|
||||||
|
model Post {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
userId Int @map("user_id")
|
||||||
|
title String @db.VarChar(200)
|
||||||
|
content String @db.Text
|
||||||
|
|
||||||
|
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
|
@@map("posts")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Prisma Attributes
|
||||||
|
|
||||||
|
- `@id` - Primary key
|
||||||
|
- `@unique` - Unique constraint
|
||||||
|
- `@default` - Default value
|
||||||
|
- `@map` - Column name mapping
|
||||||
|
- `@@map` - Table name mapping
|
||||||
|
- `@relation` - Relationship definition
|
||||||
|
- `@db.*` - Database-specific type annotations
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Extracts datasource provider information
|
||||||
|
- Supports `@@map` for custom table names
|
||||||
|
- Handles Prisma-specific types and converts them to standard SQL types
|
||||||
815
pkg/readers/prisma/reader.go
Normal file
815
pkg/readers/prisma/reader.go
Normal file
@@ -0,0 +1,815 @@
|
|||||||
|
package prisma
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reader implements the readers.Reader interface for Prisma schema format
|
||||||
|
type Reader struct {
|
||||||
|
options *readers.ReaderOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader creates a new Prisma reader with the given options
|
||||||
|
func NewReader(options *readers.ReaderOptions) *Reader {
|
||||||
|
return &Reader{
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDatabase reads and parses Prisma schema input, returning a Database model
|
||||||
|
func (r *Reader) ReadDatabase() (*models.Database, error) {
|
||||||
|
if r.options.FilePath == "" {
|
||||||
|
return nil, fmt.Errorf("file path is required for Prisma reader")
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := os.ReadFile(r.options.FilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.parsePrisma(string(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSchema reads and parses Prisma schema input, returning a Schema model
|
||||||
|
func (r *Reader) ReadSchema() (*models.Schema, error) {
|
||||||
|
db, err := r.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
return nil, fmt.Errorf("no schemas found in Prisma schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the first schema
|
||||||
|
return db.Schemas[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadTable reads and parses Prisma schema input, returning a Table model
|
||||||
|
func (r *Reader) ReadTable() (*models.Table, error) {
|
||||||
|
schema, err := r.ReadSchema()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) == 0 {
|
||||||
|
return nil, fmt.Errorf("no tables found in Prisma schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the first table
|
||||||
|
return schema.Tables[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePrisma parses Prisma schema content and returns a Database model
|
||||||
|
func (r *Reader) parsePrisma(content string) (*models.Database, error) {
|
||||||
|
db := models.InitDatabase("database")
|
||||||
|
|
||||||
|
if r.options.Metadata != nil {
|
||||||
|
if name, ok := r.options.Metadata["name"].(string); ok {
|
||||||
|
db.Name = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default schema for Prisma (doesn't have explicit schema concept in most cases)
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
schema.Enums = make([]*models.Enum, 0)
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(content))
|
||||||
|
|
||||||
|
// State tracking
|
||||||
|
var currentBlock string // "datasource", "generator", "model", "enum"
|
||||||
|
var currentTable *models.Table
|
||||||
|
var currentEnum *models.Enum
|
||||||
|
var blockContent []string
|
||||||
|
|
||||||
|
// Regex patterns
|
||||||
|
datasourceRegex := regexp.MustCompile(`^datasource\s+\w+\s*{`)
|
||||||
|
generatorRegex := regexp.MustCompile(`^generator\s+\w+\s*{`)
|
||||||
|
modelRegex := regexp.MustCompile(`^model\s+(\w+)\s*{`)
|
||||||
|
enumRegex := regexp.MustCompile(`^enum\s+(\w+)\s*{`)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
|
||||||
|
// Skip empty lines and comments
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for block start
|
||||||
|
if matches := datasourceRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
currentBlock = "datasource"
|
||||||
|
blockContent = []string{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches := generatorRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
currentBlock = "generator"
|
||||||
|
blockContent = []string{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches := modelRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
currentBlock = "model"
|
||||||
|
tableName := matches[1]
|
||||||
|
currentTable = models.InitTable(tableName, "public")
|
||||||
|
blockContent = []string{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches := enumRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
currentBlock = "enum"
|
||||||
|
enumName := matches[1]
|
||||||
|
currentEnum = &models.Enum{
|
||||||
|
Name: enumName,
|
||||||
|
Schema: "public",
|
||||||
|
Values: make([]string, 0),
|
||||||
|
}
|
||||||
|
blockContent = []string{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for block end
|
||||||
|
if trimmed == "}" {
|
||||||
|
switch currentBlock {
|
||||||
|
case "datasource":
|
||||||
|
r.parseDatasource(blockContent, db)
|
||||||
|
case "generator":
|
||||||
|
// We don't need to do anything with generator blocks
|
||||||
|
case "model":
|
||||||
|
if currentTable != nil {
|
||||||
|
r.parseModelFields(blockContent, currentTable)
|
||||||
|
schema.Tables = append(schema.Tables, currentTable)
|
||||||
|
currentTable = nil
|
||||||
|
}
|
||||||
|
case "enum":
|
||||||
|
if currentEnum != nil {
|
||||||
|
schema.Enums = append(schema.Enums, currentEnum)
|
||||||
|
currentEnum = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
currentBlock = ""
|
||||||
|
blockContent = []string{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate block content
|
||||||
|
if currentBlock != "" {
|
||||||
|
if currentBlock == "enum" && currentEnum != nil {
|
||||||
|
// For enums, just add the trimmed value
|
||||||
|
if trimmed != "" {
|
||||||
|
currentEnum.Values = append(currentEnum.Values, trimmed)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
blockContent = append(blockContent, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass: resolve relationships
|
||||||
|
r.resolveRelationships(schema)
|
||||||
|
|
||||||
|
db.Schemas = append(db.Schemas, schema)
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDatasource extracts database type from datasource block
|
||||||
|
func (r *Reader) parseDatasource(lines []string, db *models.Database) {
|
||||||
|
providerRegex := regexp.MustCompile(`provider\s*=\s*"?(\w+)"?`)
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
if matches := providerRegex.FindStringSubmatch(line); matches != nil {
|
||||||
|
provider := matches[1]
|
||||||
|
switch provider {
|
||||||
|
case "postgresql", "postgres":
|
||||||
|
db.DatabaseType = models.PostgresqlDatabaseType
|
||||||
|
case "mysql":
|
||||||
|
db.DatabaseType = "mysql"
|
||||||
|
case "sqlite":
|
||||||
|
db.DatabaseType = models.SqlLiteDatabaseType
|
||||||
|
case "sqlserver":
|
||||||
|
db.DatabaseType = models.MSSQLDatabaseType
|
||||||
|
default:
|
||||||
|
db.DatabaseType = models.PostgresqlDatabaseType
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseModelFields parses model field definitions
|
||||||
|
func (r *Reader) parseModelFields(lines []string, table *models.Table) {
|
||||||
|
fieldRegex := regexp.MustCompile(`^(\w+)\s+(\w+)(\?|\[\])?\s*(@.+)?`)
|
||||||
|
blockAttrRegex := regexp.MustCompile(`^@@(\w+)\((.*?)\)`)
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
|
||||||
|
// Skip empty lines and comments
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for block attributes (@@id, @@unique, @@index)
|
||||||
|
if matches := blockAttrRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
attrName := matches[1]
|
||||||
|
attrContent := matches[2]
|
||||||
|
r.parseBlockAttribute(attrName, attrContent, table)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse field definition
|
||||||
|
if matches := fieldRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
fieldName := matches[1]
|
||||||
|
fieldType := matches[2]
|
||||||
|
modifier := matches[3] // ? or []
|
||||||
|
attributes := matches[4] // @... part
|
||||||
|
|
||||||
|
column := r.parseField(fieldName, fieldType, modifier, attributes, table)
|
||||||
|
if column != nil {
|
||||||
|
table.Columns[column.Name] = column
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseField parses a single field definition
|
||||||
|
func (r *Reader) parseField(name, fieldType, modifier, attributes string, table *models.Table) *models.Column {
|
||||||
|
// Check if this is a relation field (array or references another model)
|
||||||
|
if modifier == "[]" {
|
||||||
|
// Array field - this is a relation field, not a column
|
||||||
|
// We'll handle this in relationship resolution
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is a non-primitive type (relation field)
|
||||||
|
// Note: We need to allow enum types through as they're like primitives
|
||||||
|
if !r.isPrimitiveType(fieldType) && !r.isEnumType(fieldType, table) {
|
||||||
|
// This is a relation field (e.g., user User), not a scalar column
|
||||||
|
// Only process this if it has @relation attribute (which means it's the owning side with FK)
|
||||||
|
// Otherwise skip it as it's just the inverse relation field
|
||||||
|
if attributes == "" || !strings.Contains(attributes, "@relation") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If it has @relation, we still don't create a column for it
|
||||||
|
// The actual FK column will be in the fields: [...] part of @relation
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
column := models.InitColumn(name, table.Name, table.Schema)
|
||||||
|
|
||||||
|
// Map Prisma type to SQL type
|
||||||
|
column.Type = r.prismaTypeToSQL(fieldType)
|
||||||
|
|
||||||
|
// Handle modifiers
|
||||||
|
if modifier == "?" {
|
||||||
|
column.NotNull = false
|
||||||
|
} else {
|
||||||
|
// Default: required fields are NOT NULL
|
||||||
|
column.NotNull = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse field attributes
|
||||||
|
if attributes != "" {
|
||||||
|
r.parseFieldAttributes(attributes, column, table)
|
||||||
|
}
|
||||||
|
|
||||||
|
return column
|
||||||
|
}
|
||||||
|
|
||||||
|
// prismaTypeToSQL converts Prisma types to SQL types
|
||||||
|
func (r *Reader) prismaTypeToSQL(prismaType string) string {
|
||||||
|
typeMap := map[string]string{
|
||||||
|
"String": "text",
|
||||||
|
"Boolean": "boolean",
|
||||||
|
"Int": "integer",
|
||||||
|
"BigInt": "bigint",
|
||||||
|
"Float": "double precision",
|
||||||
|
"Decimal": "decimal",
|
||||||
|
"DateTime": "timestamp",
|
||||||
|
"Json": "jsonb",
|
||||||
|
"Bytes": "bytea",
|
||||||
|
}
|
||||||
|
|
||||||
|
if sqlType, ok := typeMap[prismaType]; ok {
|
||||||
|
return sqlType
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not a built-in type, it might be an enum or model reference
|
||||||
|
// For enums, we'll use the enum name directly
|
||||||
|
return prismaType
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseFieldAttributes parses field attributes like @id, @unique, @default
|
||||||
|
func (r *Reader) parseFieldAttributes(attributes string, column *models.Column, table *models.Table) {
|
||||||
|
// @id attribute
|
||||||
|
if strings.Contains(attributes, "@id") {
|
||||||
|
column.IsPrimaryKey = true
|
||||||
|
column.NotNull = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// @unique attribute
|
||||||
|
if regexp.MustCompile(`@unique\b`).MatchString(attributes) {
|
||||||
|
uniqueConstraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("uq_%s", column.Name),
|
||||||
|
models.UniqueConstraint,
|
||||||
|
)
|
||||||
|
uniqueConstraint.Schema = table.Schema
|
||||||
|
uniqueConstraint.Table = table.Name
|
||||||
|
uniqueConstraint.Columns = []string{column.Name}
|
||||||
|
table.Constraints[uniqueConstraint.Name] = uniqueConstraint
|
||||||
|
}
|
||||||
|
|
||||||
|
// @default attribute - extract value with balanced parentheses
|
||||||
|
if strings.Contains(attributes, "@default(") {
|
||||||
|
defaultValue := r.extractDefaultValue(attributes)
|
||||||
|
if defaultValue != "" {
|
||||||
|
r.parseDefaultValue(defaultValue, column)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// @updatedAt attribute - store in comment for now
|
||||||
|
if strings.Contains(attributes, "@updatedAt") {
|
||||||
|
if column.Comment != "" {
|
||||||
|
column.Comment += "; @updatedAt"
|
||||||
|
} else {
|
||||||
|
column.Comment = "@updatedAt"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// @relation attribute - we'll handle this in relationship resolution
|
||||||
|
// For now, just note that this field is part of a relation
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractDefaultValue extracts the default value from @default(...) handling nested parentheses
|
||||||
|
func (r *Reader) extractDefaultValue(attributes string) string {
|
||||||
|
idx := strings.Index(attributes, "@default(")
|
||||||
|
if idx == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
start := idx + len("@default(")
|
||||||
|
depth := 1
|
||||||
|
i := start
|
||||||
|
|
||||||
|
for i < len(attributes) && depth > 0 {
|
||||||
|
switch attributes[i] {
|
||||||
|
case '(':
|
||||||
|
depth++
|
||||||
|
case ')':
|
||||||
|
depth--
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
if depth == 0 {
|
||||||
|
return attributes[start : i-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDefaultValue parses Prisma default value expressions
|
||||||
|
func (r *Reader) parseDefaultValue(defaultExpr string, column *models.Column) {
|
||||||
|
defaultExpr = strings.TrimSpace(defaultExpr)
|
||||||
|
|
||||||
|
switch defaultExpr {
|
||||||
|
case "autoincrement()":
|
||||||
|
column.AutoIncrement = true
|
||||||
|
case "now()":
|
||||||
|
column.Default = "now()"
|
||||||
|
case "uuid()":
|
||||||
|
column.Default = "gen_random_uuid()"
|
||||||
|
case "cuid()":
|
||||||
|
// CUID is Prisma-specific, store in comment
|
||||||
|
if column.Comment != "" {
|
||||||
|
column.Comment += "; default(cuid())"
|
||||||
|
} else {
|
||||||
|
column.Comment = "default(cuid())"
|
||||||
|
}
|
||||||
|
case "true":
|
||||||
|
column.Default = true
|
||||||
|
case "false":
|
||||||
|
column.Default = false
|
||||||
|
default:
|
||||||
|
// Check if it's a string literal
|
||||||
|
if strings.HasPrefix(defaultExpr, "\"") && strings.HasSuffix(defaultExpr, "\"") {
|
||||||
|
column.Default = defaultExpr[1 : len(defaultExpr)-1]
|
||||||
|
} else if strings.HasPrefix(defaultExpr, "'") && strings.HasSuffix(defaultExpr, "'") {
|
||||||
|
column.Default = defaultExpr[1 : len(defaultExpr)-1]
|
||||||
|
} else {
|
||||||
|
// Try to parse as number or enum value
|
||||||
|
column.Default = defaultExpr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseBlockAttribute parses block-level attributes like @@id, @@unique, @@index
|
||||||
|
func (r *Reader) parseBlockAttribute(attrName, content string, table *models.Table) {
|
||||||
|
// Extract column list from brackets [col1, col2]
|
||||||
|
colListRegex := regexp.MustCompile(`\[(.*?)\]`)
|
||||||
|
matches := colListRegex.FindStringSubmatch(content)
|
||||||
|
if matches == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
columnList := strings.Split(matches[1], ",")
|
||||||
|
columns := make([]string, 0)
|
||||||
|
for _, col := range columnList {
|
||||||
|
columns = append(columns, strings.TrimSpace(col))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch attrName {
|
||||||
|
case "id":
|
||||||
|
// Composite primary key
|
||||||
|
for _, colName := range columns {
|
||||||
|
if col, exists := table.Columns[colName]; exists {
|
||||||
|
col.IsPrimaryKey = true
|
||||||
|
col.NotNull = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Also create a PK constraint
|
||||||
|
pkConstraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("pk_%s", table.Name),
|
||||||
|
models.PrimaryKeyConstraint,
|
||||||
|
)
|
||||||
|
pkConstraint.Schema = table.Schema
|
||||||
|
pkConstraint.Table = table.Name
|
||||||
|
pkConstraint.Columns = columns
|
||||||
|
table.Constraints[pkConstraint.Name] = pkConstraint
|
||||||
|
|
||||||
|
case "unique":
|
||||||
|
// Multi-column unique constraint
|
||||||
|
uniqueConstraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("uq_%s_%s", table.Name, strings.Join(columns, "_")),
|
||||||
|
models.UniqueConstraint,
|
||||||
|
)
|
||||||
|
uniqueConstraint.Schema = table.Schema
|
||||||
|
uniqueConstraint.Table = table.Name
|
||||||
|
uniqueConstraint.Columns = columns
|
||||||
|
table.Constraints[uniqueConstraint.Name] = uniqueConstraint
|
||||||
|
|
||||||
|
case "index":
|
||||||
|
// Index
|
||||||
|
index := models.InitIndex(
|
||||||
|
fmt.Sprintf("idx_%s_%s", table.Name, strings.Join(columns, "_")),
|
||||||
|
table.Name,
|
||||||
|
table.Schema,
|
||||||
|
)
|
||||||
|
index.Columns = columns
|
||||||
|
table.Indexes[index.Name] = index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// relationField stores information about a relation field for second-pass processing
|
||||||
|
type relationField struct {
|
||||||
|
tableName string
|
||||||
|
fieldName string
|
||||||
|
relatedModel string
|
||||||
|
isArray bool
|
||||||
|
relationAttr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveRelationships performs a second pass to resolve @relation attributes
|
||||||
|
func (r *Reader) resolveRelationships(schema *models.Schema) {
|
||||||
|
// Build a map of table names for quick lookup
|
||||||
|
tableMap := make(map[string]*models.Table)
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
tableMap[table.Name] = table
|
||||||
|
}
|
||||||
|
|
||||||
|
// First, we need to re-parse to find relation fields
|
||||||
|
// We'll re-read the file to extract relation information
|
||||||
|
if r.options.FilePath == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := os.ReadFile(r.options.FilePath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
relations := r.extractRelationFields(string(content))
|
||||||
|
|
||||||
|
// Process explicit @relation attributes to create FK constraints
|
||||||
|
for _, rel := range relations {
|
||||||
|
if rel.relationAttr != "" {
|
||||||
|
r.createConstraintFromRelation(rel, tableMap, schema)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detect implicit many-to-many relationships
|
||||||
|
r.detectImplicitManyToMany(relations, tableMap, schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractRelationFields extracts relation field information from the schema
|
||||||
|
func (r *Reader) extractRelationFields(content string) []relationField {
|
||||||
|
relations := make([]relationField, 0)
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(content))
|
||||||
|
|
||||||
|
modelRegex := regexp.MustCompile(`^model\s+(\w+)\s*{`)
|
||||||
|
fieldRegex := regexp.MustCompile(`^(\w+)\s+(\w+)(\?|\[\])?\s*(@.+)?`)
|
||||||
|
|
||||||
|
var currentModel string
|
||||||
|
inModel := false
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "//") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches := modelRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
currentModel = matches[1]
|
||||||
|
inModel = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if trimmed == "}" {
|
||||||
|
inModel = false
|
||||||
|
currentModel = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if inModel && currentModel != "" {
|
||||||
|
if matches := fieldRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
fieldName := matches[1]
|
||||||
|
fieldType := matches[2]
|
||||||
|
modifier := matches[3]
|
||||||
|
attributes := matches[4]
|
||||||
|
|
||||||
|
// Check if this is a relation field (references another model or is an array)
|
||||||
|
isPotentialRelation := modifier == "[]" || !r.isPrimitiveType(fieldType)
|
||||||
|
|
||||||
|
if isPotentialRelation {
|
||||||
|
rel := relationField{
|
||||||
|
tableName: currentModel,
|
||||||
|
fieldName: fieldName,
|
||||||
|
relatedModel: fieldType,
|
||||||
|
isArray: modifier == "[]",
|
||||||
|
relationAttr: attributes,
|
||||||
|
}
|
||||||
|
relations = append(relations, rel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return relations
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPrimitiveType checks if a type is a Prisma primitive type
|
||||||
|
func (r *Reader) isPrimitiveType(typeName string) bool {
|
||||||
|
primitives := []string{"String", "Boolean", "Int", "BigInt", "Float", "Decimal", "DateTime", "Json", "Bytes"}
|
||||||
|
for _, p := range primitives {
|
||||||
|
if typeName == p {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isEnumType checks if a type name might be an enum
|
||||||
|
// Note: We can't definitively check against schema.Enums at parse time
|
||||||
|
// because enums might be defined after the model, so we just check
|
||||||
|
// if it starts with uppercase (Prisma convention for enums)
|
||||||
|
func (r *Reader) isEnumType(typeName string, table *models.Table) bool {
|
||||||
|
// Simple heuristic: enum types start with uppercase letter
|
||||||
|
// and are not known model names (though we can't check that yet)
|
||||||
|
if len(typeName) > 0 && typeName[0] >= 'A' && typeName[0] <= 'Z' {
|
||||||
|
// Additional check: primitive types are already handled above
|
||||||
|
// So if it's uppercase and not primitive, it's likely an enum or model
|
||||||
|
// We'll assume it's an enum if it's a single word
|
||||||
|
return !strings.Contains(typeName, "_")
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// createConstraintFromRelation creates a FK constraint from a @relation attribute
|
||||||
|
func (r *Reader) createConstraintFromRelation(rel relationField, tableMap map[string]*models.Table, schema *models.Schema) {
|
||||||
|
// Skip array fields (they are the inverse side of the relation)
|
||||||
|
if rel.isArray {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if rel.relationAttr == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse @relation attribute
|
||||||
|
relationRegex := regexp.MustCompile(`@relation\((.*?)\)`)
|
||||||
|
matches := relationRegex.FindStringSubmatch(rel.relationAttr)
|
||||||
|
if matches == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
relationContent := matches[1]
|
||||||
|
|
||||||
|
// Extract fields and references
|
||||||
|
fieldsRegex := regexp.MustCompile(`fields:\s*\[(.*?)\]`)
|
||||||
|
referencesRegex := regexp.MustCompile(`references:\s*\[(.*?)\]`)
|
||||||
|
nameRegex := regexp.MustCompile(`name:\s*"([^"]+)"`)
|
||||||
|
onDeleteRegex := regexp.MustCompile(`onDelete:\s*(\w+)`)
|
||||||
|
onUpdateRegex := regexp.MustCompile(`onUpdate:\s*(\w+)`)
|
||||||
|
|
||||||
|
fieldsMatch := fieldsRegex.FindStringSubmatch(relationContent)
|
||||||
|
referencesMatch := referencesRegex.FindStringSubmatch(relationContent)
|
||||||
|
|
||||||
|
if fieldsMatch == nil || referencesMatch == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse field and reference column lists
|
||||||
|
fieldCols := r.parseColumnList(fieldsMatch[1])
|
||||||
|
refCols := r.parseColumnList(referencesMatch[1])
|
||||||
|
|
||||||
|
if len(fieldCols) == 0 || len(refCols) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create FK constraint
|
||||||
|
constraintName := fmt.Sprintf("fk_%s_%s", rel.tableName, fieldCols[0])
|
||||||
|
|
||||||
|
// Check for custom name
|
||||||
|
if nameMatch := nameRegex.FindStringSubmatch(relationContent); nameMatch != nil {
|
||||||
|
constraintName = nameMatch[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
constraint := models.InitConstraint(constraintName, models.ForeignKeyConstraint)
|
||||||
|
constraint.Schema = "public"
|
||||||
|
constraint.Table = rel.tableName
|
||||||
|
constraint.Columns = fieldCols
|
||||||
|
constraint.ReferencedSchema = "public"
|
||||||
|
constraint.ReferencedTable = rel.relatedModel
|
||||||
|
constraint.ReferencedColumns = refCols
|
||||||
|
|
||||||
|
// Parse referential actions
|
||||||
|
if onDeleteMatch := onDeleteRegex.FindStringSubmatch(relationContent); onDeleteMatch != nil {
|
||||||
|
constraint.OnDelete = onDeleteMatch[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if onUpdateMatch := onUpdateRegex.FindStringSubmatch(relationContent); onUpdateMatch != nil {
|
||||||
|
constraint.OnUpdate = onUpdateMatch[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add constraint to table
|
||||||
|
if table, exists := tableMap[rel.tableName]; exists {
|
||||||
|
table.Constraints[constraint.Name] = constraint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseColumnList parses a comma-separated list of column names
|
||||||
|
func (r *Reader) parseColumnList(list string) []string {
|
||||||
|
parts := strings.Split(list, ",")
|
||||||
|
result := make([]string, 0)
|
||||||
|
for _, part := range parts {
|
||||||
|
trimmed := strings.TrimSpace(part)
|
||||||
|
if trimmed != "" {
|
||||||
|
result = append(result, trimmed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectImplicitManyToMany detects implicit M2M relationships and creates join tables
|
||||||
|
func (r *Reader) detectImplicitManyToMany(relations []relationField, tableMap map[string]*models.Table, schema *models.Schema) {
|
||||||
|
// Group relations by model pairs
|
||||||
|
type modelPair struct {
|
||||||
|
model1 string
|
||||||
|
model2 string
|
||||||
|
}
|
||||||
|
|
||||||
|
pairMap := make(map[modelPair][]relationField)
|
||||||
|
|
||||||
|
for _, rel := range relations {
|
||||||
|
if !rel.isArray || rel.relationAttr != "" {
|
||||||
|
// Skip non-array fields and explicit relations
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a normalized pair (alphabetically sorted to avoid duplicates)
|
||||||
|
pair := modelPair{}
|
||||||
|
if rel.tableName < rel.relatedModel {
|
||||||
|
pair.model1 = rel.tableName
|
||||||
|
pair.model2 = rel.relatedModel
|
||||||
|
} else {
|
||||||
|
pair.model1 = rel.relatedModel
|
||||||
|
pair.model2 = rel.tableName
|
||||||
|
}
|
||||||
|
|
||||||
|
pairMap[pair] = append(pairMap[pair], rel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for pairs with arrays on both sides (implicit M2M)
|
||||||
|
for pair, rels := range pairMap {
|
||||||
|
if len(rels) >= 2 {
|
||||||
|
// This is an implicit many-to-many relationship
|
||||||
|
r.createImplicitJoinTable(pair.model1, pair.model2, tableMap, schema)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createImplicitJoinTable creates a virtual join table for implicit M2M relations
|
||||||
|
func (r *Reader) createImplicitJoinTable(model1, model2 string, tableMap map[string]*models.Table, schema *models.Schema) {
|
||||||
|
// Prisma naming convention: _Model1ToModel2 (alphabetically sorted)
|
||||||
|
joinTableName := fmt.Sprintf("_%sTo%s", model1, model2)
|
||||||
|
|
||||||
|
// Check if join table already exists
|
||||||
|
if _, exists := tableMap[joinTableName]; exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create join table
|
||||||
|
joinTable := models.InitTable(joinTableName, "public")
|
||||||
|
|
||||||
|
// Get primary keys from both tables
|
||||||
|
pk1 := r.getPrimaryKeyColumn(tableMap[model1])
|
||||||
|
pk2 := r.getPrimaryKeyColumn(tableMap[model2])
|
||||||
|
|
||||||
|
if pk1 == nil || pk2 == nil {
|
||||||
|
return // Can't create join table without PKs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create FK columns in join table
|
||||||
|
fkCol1Name := fmt.Sprintf("%sId", model1)
|
||||||
|
fkCol1 := models.InitColumn(fkCol1Name, joinTableName, "public")
|
||||||
|
fkCol1.Type = pk1.Type
|
||||||
|
fkCol1.NotNull = true
|
||||||
|
joinTable.Columns[fkCol1Name] = fkCol1
|
||||||
|
|
||||||
|
fkCol2Name := fmt.Sprintf("%sId", model2)
|
||||||
|
fkCol2 := models.InitColumn(fkCol2Name, joinTableName, "public")
|
||||||
|
fkCol2.Type = pk2.Type
|
||||||
|
fkCol2.NotNull = true
|
||||||
|
joinTable.Columns[fkCol2Name] = fkCol2
|
||||||
|
|
||||||
|
// Create composite primary key
|
||||||
|
pkConstraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("pk_%s", joinTableName),
|
||||||
|
models.PrimaryKeyConstraint,
|
||||||
|
)
|
||||||
|
pkConstraint.Schema = "public"
|
||||||
|
pkConstraint.Table = joinTableName
|
||||||
|
pkConstraint.Columns = []string{fkCol1Name, fkCol2Name}
|
||||||
|
joinTable.Constraints[pkConstraint.Name] = pkConstraint
|
||||||
|
|
||||||
|
// Mark columns as PK
|
||||||
|
fkCol1.IsPrimaryKey = true
|
||||||
|
fkCol2.IsPrimaryKey = true
|
||||||
|
|
||||||
|
// Create FK constraints
|
||||||
|
fk1 := models.InitConstraint(
|
||||||
|
fmt.Sprintf("fk_%s_%s", joinTableName, model1),
|
||||||
|
models.ForeignKeyConstraint,
|
||||||
|
)
|
||||||
|
fk1.Schema = "public"
|
||||||
|
fk1.Table = joinTableName
|
||||||
|
fk1.Columns = []string{fkCol1Name}
|
||||||
|
fk1.ReferencedSchema = "public"
|
||||||
|
fk1.ReferencedTable = model1
|
||||||
|
fk1.ReferencedColumns = []string{pk1.Name}
|
||||||
|
fk1.OnDelete = "Cascade"
|
||||||
|
joinTable.Constraints[fk1.Name] = fk1
|
||||||
|
|
||||||
|
fk2 := models.InitConstraint(
|
||||||
|
fmt.Sprintf("fk_%s_%s", joinTableName, model2),
|
||||||
|
models.ForeignKeyConstraint,
|
||||||
|
)
|
||||||
|
fk2.Schema = "public"
|
||||||
|
fk2.Table = joinTableName
|
||||||
|
fk2.Columns = []string{fkCol2Name}
|
||||||
|
fk2.ReferencedSchema = "public"
|
||||||
|
fk2.ReferencedTable = model2
|
||||||
|
fk2.ReferencedColumns = []string{pk2.Name}
|
||||||
|
fk2.OnDelete = "Cascade"
|
||||||
|
joinTable.Constraints[fk2.Name] = fk2
|
||||||
|
|
||||||
|
// Add join table to schema
|
||||||
|
schema.Tables = append(schema.Tables, joinTable)
|
||||||
|
tableMap[joinTableName] = joinTable
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPrimaryKeyColumn returns the primary key column of a table
|
||||||
|
func (r *Reader) getPrimaryKeyColumn(table *models.Table) *models.Column {
|
||||||
|
if table == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if col.IsPrimaryKey {
|
||||||
|
return col
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
122
pkg/readers/typeorm/README.md
Normal file
122
pkg/readers/typeorm/README.md
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
# TypeORM Reader
|
||||||
|
|
||||||
|
Reads TypeScript files containing TypeORM entity definitions and extracts database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The TypeORM Reader parses TypeScript source files that define TypeORM entities (classes with TypeORM decorators) and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Parses TypeORM decorators and entity definitions
|
||||||
|
- Extracts table, column, and relationship information
|
||||||
|
- Supports various TypeORM column types and options
|
||||||
|
- Handles constraints, indexes, and relationships
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/typeorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/entities/",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := typeorm.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read TypeORM entities and convert to JSON
|
||||||
|
relspec --input typeorm --in-file entities/ --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert TypeORM to GORM models
|
||||||
|
relspec --input typeorm --in-file User.ts --output gorm --out-file models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example TypeORM Entity
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import {
|
||||||
|
Entity,
|
||||||
|
PrimaryGeneratedColumn,
|
||||||
|
Column,
|
||||||
|
CreateDateColumn,
|
||||||
|
OneToMany,
|
||||||
|
} from 'typeorm';
|
||||||
|
import { Post } from './Post';
|
||||||
|
|
||||||
|
@Entity('users')
|
||||||
|
export class User {
|
||||||
|
@PrimaryGeneratedColumn('increment')
|
||||||
|
id: number;
|
||||||
|
|
||||||
|
@Column({ type: 'varchar', length: 50, unique: true })
|
||||||
|
username: string;
|
||||||
|
|
||||||
|
@Column({ type: 'varchar', length: 100 })
|
||||||
|
email: string;
|
||||||
|
|
||||||
|
@CreateDateColumn({ name: 'created_at' })
|
||||||
|
createdAt: Date;
|
||||||
|
|
||||||
|
@OneToMany(() => Post, (post) => post.user)
|
||||||
|
posts: Post[];
|
||||||
|
}
|
||||||
|
|
||||||
|
@Entity('posts')
|
||||||
|
export class Post {
|
||||||
|
@PrimaryGeneratedColumn('increment')
|
||||||
|
id: number;
|
||||||
|
|
||||||
|
@Column({ name: 'user_id' })
|
||||||
|
userId: number;
|
||||||
|
|
||||||
|
@Column({ type: 'varchar', length: 200 })
|
||||||
|
title: string;
|
||||||
|
|
||||||
|
@Column({ type: 'text' })
|
||||||
|
content: string;
|
||||||
|
|
||||||
|
@ManyToOne(() => User, (user) => user.posts, { onDelete: 'CASCADE' })
|
||||||
|
@JoinColumn({ name: 'user_id' })
|
||||||
|
user: User;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported TypeORM Decorators
|
||||||
|
|
||||||
|
- `@Entity()` - Entity/table definition
|
||||||
|
- `@PrimaryGeneratedColumn()` - Auto-increment primary key
|
||||||
|
- `@PrimaryColumn()` - Primary key
|
||||||
|
- `@Column()` - Column definition
|
||||||
|
- `@CreateDateColumn()` - Auto-set creation timestamp
|
||||||
|
- `@UpdateDateColumn()` - Auto-update timestamp
|
||||||
|
- `@OneToMany()` - One-to-many relationship
|
||||||
|
- `@ManyToOne()` - Many-to-one relationship
|
||||||
|
- `@JoinColumn()` - Foreign key column
|
||||||
|
- `@Index()` - Index definition
|
||||||
|
- `@Unique()` - Unique constraint
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Schema name can be specified in `@Entity()` decorator
|
||||||
|
- Supports both JavaScript and TypeScript entity files
|
||||||
|
- Relationship metadata is extracted from decorators
|
||||||
785
pkg/readers/typeorm/reader.go
Normal file
785
pkg/readers/typeorm/reader.go
Normal file
@@ -0,0 +1,785 @@
|
|||||||
|
package typeorm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reader implements the readers.Reader interface for TypeORM entity files
|
||||||
|
type Reader struct {
|
||||||
|
options *readers.ReaderOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader creates a new TypeORM reader with the given options
|
||||||
|
func NewReader(options *readers.ReaderOptions) *Reader {
|
||||||
|
return &Reader{
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDatabase reads and parses TypeORM entity files, returning a Database model
|
||||||
|
func (r *Reader) ReadDatabase() (*models.Database, error) {
|
||||||
|
if r.options.FilePath == "" {
|
||||||
|
return nil, fmt.Errorf("file path is required for TypeORM reader")
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := os.ReadFile(r.options.FilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.parseTypeORM(string(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSchema reads and parses TypeORM entity files, returning a Schema model
|
||||||
|
func (r *Reader) ReadSchema() (*models.Schema, error) {
|
||||||
|
db, err := r.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(db.Schemas) == 0 {
|
||||||
|
return nil, fmt.Errorf("no schemas found in TypeORM entities")
|
||||||
|
}
|
||||||
|
|
||||||
|
return db.Schemas[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadTable reads and parses TypeORM entity files, returning a Table model
|
||||||
|
func (r *Reader) ReadTable() (*models.Table, error) {
|
||||||
|
schema, err := r.ReadSchema()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schema.Tables) == 0 {
|
||||||
|
return nil, fmt.Errorf("no tables found in TypeORM entities")
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.Tables[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// entityInfo stores information about an entity during parsing
|
||||||
|
type entityInfo struct {
|
||||||
|
name string
|
||||||
|
fields []fieldInfo
|
||||||
|
decorators []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldInfo stores information about a field during parsing
|
||||||
|
type fieldInfo struct {
|
||||||
|
name string
|
||||||
|
typeName string
|
||||||
|
decorators []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTypeORM parses TypeORM entity content and returns a Database model
|
||||||
|
func (r *Reader) parseTypeORM(content string) (*models.Database, error) {
|
||||||
|
db := models.InitDatabase("database")
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
|
||||||
|
// Parse entities
|
||||||
|
entities := r.extractEntities(content)
|
||||||
|
|
||||||
|
// Convert entities to tables and views
|
||||||
|
tableMap := make(map[string]*models.Table)
|
||||||
|
for _, entity := range entities {
|
||||||
|
// Check if this is a view
|
||||||
|
isView := false
|
||||||
|
for _, decorator := range entity.decorators {
|
||||||
|
if strings.HasPrefix(decorator, "@ViewEntity") {
|
||||||
|
isView = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isView {
|
||||||
|
view := r.entityToView(entity)
|
||||||
|
schema.Views = append(schema.Views, view)
|
||||||
|
} else {
|
||||||
|
table := r.entityToTable(entity)
|
||||||
|
schema.Tables = append(schema.Tables, table)
|
||||||
|
tableMap[table.Name] = table
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass: resolve relationships
|
||||||
|
r.resolveRelationships(entities, tableMap, schema)
|
||||||
|
|
||||||
|
db.Schemas = append(db.Schemas, schema)
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractEntities extracts entity and view definitions from TypeORM content
|
||||||
|
func (r *Reader) extractEntities(content string) []entityInfo {
|
||||||
|
entities := make([]entityInfo, 0)
|
||||||
|
|
||||||
|
// First, extract decorators properly (handling multi-line)
|
||||||
|
content = r.normalizeDecorators(content)
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(content))
|
||||||
|
|
||||||
|
entityRegex := regexp.MustCompile(`^export\s+class\s+(\w+)`)
|
||||||
|
decoratorRegex := regexp.MustCompile(`^\s*@(\w+)(\([^)]*\))?`)
|
||||||
|
fieldRegex := regexp.MustCompile(`^\s*(\w+):\s*([^;]+);`)
|
||||||
|
|
||||||
|
var currentEntity *entityInfo
|
||||||
|
var pendingDecorators []string
|
||||||
|
inClass := false
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
trimmed := strings.TrimSpace(line)
|
||||||
|
|
||||||
|
// Skip empty lines and comments
|
||||||
|
if trimmed == "" || strings.HasPrefix(trimmed, "//") || strings.HasPrefix(trimmed, "import ") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for decorator
|
||||||
|
if matches := decoratorRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
decorator := matches[0]
|
||||||
|
pendingDecorators = append(pendingDecorators, decorator)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for entity/view class
|
||||||
|
if matches := entityRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
// Save previous entity if exists
|
||||||
|
if currentEntity != nil {
|
||||||
|
entities = append(entities, *currentEntity)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentEntity = &entityInfo{
|
||||||
|
name: matches[1],
|
||||||
|
fields: make([]fieldInfo, 0),
|
||||||
|
decorators: pendingDecorators,
|
||||||
|
}
|
||||||
|
pendingDecorators = []string{}
|
||||||
|
inClass = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for class end
|
||||||
|
if inClass && trimmed == "}" {
|
||||||
|
if currentEntity != nil {
|
||||||
|
entities = append(entities, *currentEntity)
|
||||||
|
currentEntity = nil
|
||||||
|
}
|
||||||
|
inClass = false
|
||||||
|
pendingDecorators = []string{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for field definition
|
||||||
|
if inClass && currentEntity != nil {
|
||||||
|
if matches := fieldRegex.FindStringSubmatch(trimmed); matches != nil {
|
||||||
|
fieldName := matches[1]
|
||||||
|
fieldType := strings.TrimSpace(matches[2])
|
||||||
|
|
||||||
|
field := fieldInfo{
|
||||||
|
name: fieldName,
|
||||||
|
typeName: fieldType,
|
||||||
|
decorators: pendingDecorators,
|
||||||
|
}
|
||||||
|
currentEntity.fields = append(currentEntity.fields, field)
|
||||||
|
pendingDecorators = []string{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save last entity
|
||||||
|
if currentEntity != nil {
|
||||||
|
entities = append(entities, *currentEntity)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entities
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeDecorators combines multi-line decorators into single lines
|
||||||
|
func (r *Reader) normalizeDecorators(content string) string {
|
||||||
|
// Replace multi-line decorators with single-line versions
|
||||||
|
// Match @Decorator({ ... }) across multiple lines
|
||||||
|
decoratorRegex := regexp.MustCompile(`@(\w+)\s*\(\s*\{([^}]*)\}\s*\)`)
|
||||||
|
|
||||||
|
return decoratorRegex.ReplaceAllStringFunc(content, func(match string) string {
|
||||||
|
// Remove newlines and extra spaces from decorator
|
||||||
|
match = strings.ReplaceAll(match, "\n", " ")
|
||||||
|
match = strings.ReplaceAll(match, "\r", " ")
|
||||||
|
// Normalize multiple spaces
|
||||||
|
spaceRegex := regexp.MustCompile(`\s+`)
|
||||||
|
match = spaceRegex.ReplaceAllString(match, " ")
|
||||||
|
return match
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// entityToView converts a view entity to a view
|
||||||
|
func (r *Reader) entityToView(entity entityInfo) *models.View {
|
||||||
|
// Parse @ViewEntity decorator options
|
||||||
|
viewName := entity.name
|
||||||
|
schemaName := "public"
|
||||||
|
var expression string
|
||||||
|
|
||||||
|
for _, decorator := range entity.decorators {
|
||||||
|
if strings.HasPrefix(decorator, "@ViewEntity") {
|
||||||
|
// Extract options from @ViewEntity({ ... })
|
||||||
|
options := r.parseViewEntityOptions(decorator)
|
||||||
|
|
||||||
|
// Check for custom view name
|
||||||
|
if name, ok := options["name"]; ok {
|
||||||
|
viewName = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for schema
|
||||||
|
if schema, ok := options["schema"]; ok {
|
||||||
|
schemaName = schema
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for expression (SQL definition)
|
||||||
|
if expr, ok := options["expression"]; ok {
|
||||||
|
expression = expr
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
view := models.InitView(viewName, schemaName)
|
||||||
|
view.Definition = expression
|
||||||
|
|
||||||
|
// Add columns from fields (if any are defined in the view class)
|
||||||
|
for _, field := range entity.fields {
|
||||||
|
column := models.InitColumn(field.name, viewName, schemaName)
|
||||||
|
column.Type = r.typeScriptTypeToSQL(field.typeName)
|
||||||
|
view.Columns[column.Name] = column
|
||||||
|
}
|
||||||
|
|
||||||
|
return view
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseViewEntityOptions parses @ViewEntity decorator options
|
||||||
|
func (r *Reader) parseViewEntityOptions(decorator string) map[string]string {
|
||||||
|
options := make(map[string]string)
|
||||||
|
|
||||||
|
// Extract content between parentheses
|
||||||
|
start := strings.Index(decorator, "(")
|
||||||
|
end := strings.LastIndex(decorator, ")")
|
||||||
|
|
||||||
|
if start == -1 || end == -1 || start >= end {
|
||||||
|
return options
|
||||||
|
}
|
||||||
|
|
||||||
|
content := decorator[start+1 : end]
|
||||||
|
|
||||||
|
// Skip if empty @ViewEntity()
|
||||||
|
if strings.TrimSpace(content) == "" {
|
||||||
|
return options
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse name: "value"
|
||||||
|
nameRegex := regexp.MustCompile(`name:\s*["']([^"']+)["']`)
|
||||||
|
if matches := nameRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
options["name"] = matches[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse schema: "value"
|
||||||
|
schemaRegex := regexp.MustCompile(`schema:\s*["']([^"']+)["']`)
|
||||||
|
if matches := schemaRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
options["schema"] = matches[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse expression: ` ... ` (can be multi-line, captured as single line after normalization)
|
||||||
|
// Look for expression followed by backtick or quote
|
||||||
|
expressionRegex := regexp.MustCompile(`expression:\s*` + "`" + `([^` + "`" + `]+)` + "`")
|
||||||
|
if matches := expressionRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
options["expression"] = strings.TrimSpace(matches[1])
|
||||||
|
} else {
|
||||||
|
// Try with regular quotes
|
||||||
|
expressionRegex = regexp.MustCompile(`expression:\s*["']([^"']+)["']`)
|
||||||
|
if matches := expressionRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
options["expression"] = strings.TrimSpace(matches[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return options
|
||||||
|
}
|
||||||
|
|
||||||
|
// entityToTable converts an entity to a table
|
||||||
|
func (r *Reader) entityToTable(entity entityInfo) *models.Table {
|
||||||
|
// Parse @Entity decorator options
|
||||||
|
tableName := entity.name
|
||||||
|
schemaName := "public"
|
||||||
|
var entityOptions map[string]string
|
||||||
|
|
||||||
|
for _, decorator := range entity.decorators {
|
||||||
|
if strings.HasPrefix(decorator, "@Entity") {
|
||||||
|
// Extract options from @Entity({ ... })
|
||||||
|
entityOptions = r.parseEntityOptions(decorator)
|
||||||
|
|
||||||
|
// Check for custom table name
|
||||||
|
if name, ok := entityOptions["name"]; ok {
|
||||||
|
tableName = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for schema
|
||||||
|
if schema, ok := entityOptions["schema"]; ok {
|
||||||
|
schemaName = schema
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
table := models.InitTable(tableName, schemaName)
|
||||||
|
|
||||||
|
// Store additional metadata from @Entity options
|
||||||
|
if entityOptions != nil {
|
||||||
|
// Store database name in metadata
|
||||||
|
if database, ok := entityOptions["database"]; ok {
|
||||||
|
if table.Metadata == nil {
|
||||||
|
table.Metadata = make(map[string]any)
|
||||||
|
}
|
||||||
|
table.Metadata["database"] = database
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store engine in metadata
|
||||||
|
if engine, ok := entityOptions["engine"]; ok {
|
||||||
|
if table.Metadata == nil {
|
||||||
|
table.Metadata = make(map[string]any)
|
||||||
|
}
|
||||||
|
table.Metadata["engine"] = engine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store original class name if different from table name
|
||||||
|
if entity.name != tableName {
|
||||||
|
if table.Metadata == nil {
|
||||||
|
table.Metadata = make(map[string]any)
|
||||||
|
}
|
||||||
|
table.Metadata["class_name"] = entity.name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, field := range entity.fields {
|
||||||
|
// Skip relation fields (they'll be handled in relationship resolution)
|
||||||
|
if r.isRelationField(field) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
column := r.fieldToColumn(field, table)
|
||||||
|
if column != nil {
|
||||||
|
table.Columns[column.Name] = column
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return table
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseEntityOptions parses @Entity decorator options
|
||||||
|
func (r *Reader) parseEntityOptions(decorator string) map[string]string {
|
||||||
|
options := make(map[string]string)
|
||||||
|
|
||||||
|
// Extract content between parentheses
|
||||||
|
start := strings.Index(decorator, "(")
|
||||||
|
end := strings.LastIndex(decorator, ")")
|
||||||
|
|
||||||
|
if start == -1 || end == -1 || start >= end {
|
||||||
|
return options
|
||||||
|
}
|
||||||
|
|
||||||
|
content := decorator[start+1 : end]
|
||||||
|
|
||||||
|
// Skip if empty @Entity()
|
||||||
|
if strings.TrimSpace(content) == "" {
|
||||||
|
return options
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse name: "value" or name: 'value'
|
||||||
|
nameRegex := regexp.MustCompile(`name:\s*["']([^"']+)["']`)
|
||||||
|
if matches := nameRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
options["name"] = matches[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse schema: "value"
|
||||||
|
schemaRegex := regexp.MustCompile(`schema:\s*["']([^"']+)["']`)
|
||||||
|
if matches := schemaRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
options["schema"] = matches[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse database: "value"
|
||||||
|
databaseRegex := regexp.MustCompile(`database:\s*["']([^"']+)["']`)
|
||||||
|
if matches := databaseRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
options["database"] = matches[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse engine: "value"
|
||||||
|
engineRegex := regexp.MustCompile(`engine:\s*["']([^"']+)["']`)
|
||||||
|
if matches := engineRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
options["engine"] = matches[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return options
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRelationField checks if a field is a relation field
|
||||||
|
func (r *Reader) isRelationField(field fieldInfo) bool {
|
||||||
|
for _, decorator := range field.decorators {
|
||||||
|
if strings.Contains(decorator, "@ManyToOne") ||
|
||||||
|
strings.Contains(decorator, "@OneToMany") ||
|
||||||
|
strings.Contains(decorator, "@ManyToMany") ||
|
||||||
|
strings.Contains(decorator, "@OneToOne") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldToColumn converts a field to a column
|
||||||
|
func (r *Reader) fieldToColumn(field fieldInfo, table *models.Table) *models.Column {
|
||||||
|
column := models.InitColumn(field.name, table.Name, table.Schema)
|
||||||
|
|
||||||
|
// Map TypeScript type to SQL type
|
||||||
|
column.Type = r.typeScriptTypeToSQL(field.typeName)
|
||||||
|
|
||||||
|
// Default to NOT NULL
|
||||||
|
column.NotNull = true
|
||||||
|
|
||||||
|
// Parse decorators
|
||||||
|
for _, decorator := range field.decorators {
|
||||||
|
r.parseColumnDecorator(decorator, column, table)
|
||||||
|
}
|
||||||
|
|
||||||
|
return column
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeScriptTypeToSQL converts TypeScript types to SQL types
|
||||||
|
func (r *Reader) typeScriptTypeToSQL(tsType string) string {
|
||||||
|
// Remove array brackets and optional markers
|
||||||
|
tsType = strings.TrimSuffix(tsType, "[]")
|
||||||
|
tsType = strings.TrimSuffix(tsType, " | null")
|
||||||
|
|
||||||
|
typeMap := map[string]string{
|
||||||
|
"string": "text",
|
||||||
|
"number": "integer",
|
||||||
|
"boolean": "boolean",
|
||||||
|
"Date": "timestamp",
|
||||||
|
"any": "jsonb",
|
||||||
|
}
|
||||||
|
|
||||||
|
for tsPattern, sqlType := range typeMap {
|
||||||
|
if strings.Contains(tsType, tsPattern) {
|
||||||
|
return sqlType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to text
|
||||||
|
return "text"
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseColumnDecorator parses a column decorator
|
||||||
|
func (r *Reader) parseColumnDecorator(decorator string, column *models.Column, table *models.Table) {
|
||||||
|
// @PrimaryGeneratedColumn
|
||||||
|
if strings.HasPrefix(decorator, "@PrimaryGeneratedColumn") {
|
||||||
|
column.IsPrimaryKey = true
|
||||||
|
column.NotNull = true
|
||||||
|
|
||||||
|
if strings.Contains(decorator, "'uuid'") {
|
||||||
|
column.Type = "uuid"
|
||||||
|
column.Default = "gen_random_uuid()"
|
||||||
|
} else if strings.Contains(decorator, "'increment'") || strings.Contains(decorator, "()") {
|
||||||
|
column.AutoIncrement = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// @Column
|
||||||
|
if strings.HasPrefix(decorator, "@Column") {
|
||||||
|
r.parseColumnOptions(decorator, column, table)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// @CreateDateColumn
|
||||||
|
if strings.HasPrefix(decorator, "@CreateDateColumn") {
|
||||||
|
column.Type = "timestamp"
|
||||||
|
column.Default = "now()"
|
||||||
|
column.NotNull = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// @UpdateDateColumn
|
||||||
|
if strings.HasPrefix(decorator, "@UpdateDateColumn") {
|
||||||
|
column.Type = "timestamp"
|
||||||
|
column.NotNull = true
|
||||||
|
if column.Comment != "" {
|
||||||
|
column.Comment += "; auto-update"
|
||||||
|
} else {
|
||||||
|
column.Comment = "auto-update"
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseColumnOptions parses @Column decorator options
|
||||||
|
func (r *Reader) parseColumnOptions(decorator string, column *models.Column, table *models.Table) {
|
||||||
|
// Extract content between parentheses
|
||||||
|
start := strings.Index(decorator, "(")
|
||||||
|
end := strings.LastIndex(decorator, ")")
|
||||||
|
|
||||||
|
if start == -1 || end == -1 || start >= end {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content := decorator[start+1 : end]
|
||||||
|
|
||||||
|
// Check for shorthand type: @Column('text')
|
||||||
|
if strings.HasPrefix(content, "'") || strings.HasPrefix(content, "\"") {
|
||||||
|
typeStr := strings.Trim(content, "'\"`")
|
||||||
|
column.Type = typeStr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse options object
|
||||||
|
if strings.Contains(content, "type:") {
|
||||||
|
typeRegex := regexp.MustCompile(`type:\s*['"]([^'"]+)['"]`)
|
||||||
|
if matches := typeRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
column.Type = matches[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(content, "nullable: true") || strings.Contains(content, "nullable:true") {
|
||||||
|
column.NotNull = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(content, "unique: true") || strings.Contains(content, "unique:true") {
|
||||||
|
uniqueConstraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("uq_%s", column.Name),
|
||||||
|
models.UniqueConstraint,
|
||||||
|
)
|
||||||
|
uniqueConstraint.Schema = table.Schema
|
||||||
|
uniqueConstraint.Table = table.Name
|
||||||
|
uniqueConstraint.Columns = []string{column.Name}
|
||||||
|
table.Constraints[uniqueConstraint.Name] = uniqueConstraint
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(content, "default:") {
|
||||||
|
defaultRegex := regexp.MustCompile(`default:\s*['"]?([^,}'"]+)['"]?`)
|
||||||
|
if matches := defaultRegex.FindStringSubmatch(content); matches != nil {
|
||||||
|
defaultValue := strings.TrimSpace(matches[1])
|
||||||
|
defaultValue = strings.Trim(defaultValue, "'\"")
|
||||||
|
column.Default = defaultValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveRelationships resolves TypeORM relationships
|
||||||
|
func (r *Reader) resolveRelationships(entities []entityInfo, tableMap map[string]*models.Table, schema *models.Schema) {
|
||||||
|
// Track M2M relations that need join tables
|
||||||
|
type m2mRelation struct {
|
||||||
|
ownerEntity string
|
||||||
|
targetEntity string
|
||||||
|
ownerField string
|
||||||
|
}
|
||||||
|
m2mRelations := make([]m2mRelation, 0)
|
||||||
|
|
||||||
|
for _, entity := range entities {
|
||||||
|
table := tableMap[entity.name]
|
||||||
|
if table == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, field := range entity.fields {
|
||||||
|
// Handle @ManyToOne relations
|
||||||
|
if r.hasDecorator(field, "@ManyToOne") {
|
||||||
|
r.createManyToOneConstraint(field, entity.name, table, tableMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track @ManyToMany relations with @JoinTable
|
||||||
|
if r.hasDecorator(field, "@ManyToMany") && r.hasDecorator(field, "@JoinTable") {
|
||||||
|
targetEntity := r.extractRelationTarget(field)
|
||||||
|
if targetEntity != "" {
|
||||||
|
m2mRelations = append(m2mRelations, m2mRelation{
|
||||||
|
ownerEntity: entity.name,
|
||||||
|
targetEntity: targetEntity,
|
||||||
|
ownerField: field.name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create join tables for M2M relations
|
||||||
|
for _, rel := range m2mRelations {
|
||||||
|
r.createManyToManyJoinTable(rel.ownerEntity, rel.targetEntity, tableMap, schema)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasDecorator checks if a field has a specific decorator
|
||||||
|
func (r *Reader) hasDecorator(field fieldInfo, decoratorName string) bool {
|
||||||
|
for _, decorator := range field.decorators {
|
||||||
|
if strings.HasPrefix(decorator, decoratorName) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractRelationTarget extracts the target entity from a relation decorator
|
||||||
|
func (r *Reader) extractRelationTarget(field fieldInfo) string {
|
||||||
|
// Remove array brackets from type
|
||||||
|
targetType := strings.TrimSuffix(field.typeName, "[]")
|
||||||
|
targetType = strings.TrimSpace(targetType)
|
||||||
|
return targetType
|
||||||
|
}
|
||||||
|
|
||||||
|
// createManyToOneConstraint creates a foreign key constraint for @ManyToOne
|
||||||
|
func (r *Reader) createManyToOneConstraint(field fieldInfo, entityName string, table *models.Table, tableMap map[string]*models.Table) {
|
||||||
|
targetEntity := r.extractRelationTarget(field)
|
||||||
|
if targetEntity == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get target table to find its PK
|
||||||
|
targetTable := tableMap[targetEntity]
|
||||||
|
if targetTable == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
targetPK := r.getPrimaryKeyColumn(targetTable)
|
||||||
|
if targetPK == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create FK column
|
||||||
|
fkColumnName := fmt.Sprintf("%sId", field.name)
|
||||||
|
fkColumn := models.InitColumn(fkColumnName, table.Name, table.Schema)
|
||||||
|
fkColumn.Type = targetPK.Type
|
||||||
|
|
||||||
|
// Check if nullable option is set in @ManyToOne decorator
|
||||||
|
isNullable := false
|
||||||
|
for _, decorator := range field.decorators {
|
||||||
|
if strings.Contains(decorator, "nullable: true") || strings.Contains(decorator, "nullable:true") {
|
||||||
|
isNullable = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fkColumn.NotNull = !isNullable
|
||||||
|
|
||||||
|
table.Columns[fkColumnName] = fkColumn
|
||||||
|
|
||||||
|
// Create FK constraint
|
||||||
|
constraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("fk_%s_%s", entityName, field.name),
|
||||||
|
models.ForeignKeyConstraint,
|
||||||
|
)
|
||||||
|
constraint.Schema = table.Schema
|
||||||
|
constraint.Table = table.Name
|
||||||
|
constraint.Columns = []string{fkColumnName}
|
||||||
|
constraint.ReferencedSchema = "public"
|
||||||
|
constraint.ReferencedTable = targetEntity
|
||||||
|
constraint.ReferencedColumns = []string{targetPK.Name}
|
||||||
|
constraint.OnDelete = "CASCADE"
|
||||||
|
|
||||||
|
table.Constraints[constraint.Name] = constraint
|
||||||
|
}
|
||||||
|
|
||||||
|
// createManyToManyJoinTable creates a join table for M2M relations
|
||||||
|
func (r *Reader) createManyToManyJoinTable(entity1, entity2 string, tableMap map[string]*models.Table, schema *models.Schema) {
|
||||||
|
// TypeORM naming convention: entity1_entity2_entity1field
|
||||||
|
// We'll simplify to entity1_entity2
|
||||||
|
joinTableName := fmt.Sprintf("%s_%s", strings.ToLower(entity1), strings.ToLower(entity2))
|
||||||
|
|
||||||
|
// Check if join table already exists
|
||||||
|
if _, exists := tableMap[joinTableName]; exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get PKs from both tables
|
||||||
|
table1 := tableMap[entity1]
|
||||||
|
table2 := tableMap[entity2]
|
||||||
|
if table1 == nil || table2 == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pk1 := r.getPrimaryKeyColumn(table1)
|
||||||
|
pk2 := r.getPrimaryKeyColumn(table2)
|
||||||
|
if pk1 == nil || pk2 == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create join table
|
||||||
|
joinTable := models.InitTable(joinTableName, "public")
|
||||||
|
|
||||||
|
// Create FK columns
|
||||||
|
fkCol1Name := fmt.Sprintf("%sId", strings.ToLower(entity1))
|
||||||
|
fkCol1 := models.InitColumn(fkCol1Name, joinTableName, "public")
|
||||||
|
fkCol1.Type = pk1.Type
|
||||||
|
fkCol1.NotNull = true
|
||||||
|
fkCol1.IsPrimaryKey = true
|
||||||
|
joinTable.Columns[fkCol1Name] = fkCol1
|
||||||
|
|
||||||
|
fkCol2Name := fmt.Sprintf("%sId", strings.ToLower(entity2))
|
||||||
|
fkCol2 := models.InitColumn(fkCol2Name, joinTableName, "public")
|
||||||
|
fkCol2.Type = pk2.Type
|
||||||
|
fkCol2.NotNull = true
|
||||||
|
fkCol2.IsPrimaryKey = true
|
||||||
|
joinTable.Columns[fkCol2Name] = fkCol2
|
||||||
|
|
||||||
|
// Create composite PK constraint
|
||||||
|
pkConstraint := models.InitConstraint(
|
||||||
|
fmt.Sprintf("pk_%s", joinTableName),
|
||||||
|
models.PrimaryKeyConstraint,
|
||||||
|
)
|
||||||
|
pkConstraint.Schema = "public"
|
||||||
|
pkConstraint.Table = joinTableName
|
||||||
|
pkConstraint.Columns = []string{fkCol1Name, fkCol2Name}
|
||||||
|
joinTable.Constraints[pkConstraint.Name] = pkConstraint
|
||||||
|
|
||||||
|
// Create FK constraints
|
||||||
|
fk1 := models.InitConstraint(
|
||||||
|
fmt.Sprintf("fk_%s_%s", joinTableName, entity1),
|
||||||
|
models.ForeignKeyConstraint,
|
||||||
|
)
|
||||||
|
fk1.Schema = "public"
|
||||||
|
fk1.Table = joinTableName
|
||||||
|
fk1.Columns = []string{fkCol1Name}
|
||||||
|
fk1.ReferencedSchema = "public"
|
||||||
|
fk1.ReferencedTable = entity1
|
||||||
|
fk1.ReferencedColumns = []string{pk1.Name}
|
||||||
|
fk1.OnDelete = "CASCADE"
|
||||||
|
joinTable.Constraints[fk1.Name] = fk1
|
||||||
|
|
||||||
|
fk2 := models.InitConstraint(
|
||||||
|
fmt.Sprintf("fk_%s_%s", joinTableName, entity2),
|
||||||
|
models.ForeignKeyConstraint,
|
||||||
|
)
|
||||||
|
fk2.Schema = "public"
|
||||||
|
fk2.Table = joinTableName
|
||||||
|
fk2.Columns = []string{fkCol2Name}
|
||||||
|
fk2.ReferencedSchema = "public"
|
||||||
|
fk2.ReferencedTable = entity2
|
||||||
|
fk2.ReferencedColumns = []string{pk2.Name}
|
||||||
|
fk2.OnDelete = "CASCADE"
|
||||||
|
joinTable.Constraints[fk2.Name] = fk2
|
||||||
|
|
||||||
|
// Add join table to schema
|
||||||
|
schema.Tables = append(schema.Tables, joinTable)
|
||||||
|
tableMap[joinTableName] = joinTable
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPrimaryKeyColumn returns the primary key column of a table
|
||||||
|
func (r *Reader) getPrimaryKeyColumn(table *models.Table) *models.Column {
|
||||||
|
if table == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if col.IsPrimaryKey {
|
||||||
|
return col
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
159
pkg/readers/yaml/README.md
Normal file
159
pkg/readers/yaml/README.md
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
# YAML Reader
|
||||||
|
|
||||||
|
Reads database schema definitions from YAML files.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The YAML Reader parses YAML files that define database schemas in RelSpec's canonical YAML format and converts them into RelSpec's internal database model representation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Reads RelSpec's standard YAML schema format
|
||||||
|
- Human-readable alternative to JSON format
|
||||||
|
- Supports complete schema representation including:
|
||||||
|
- Databases and schemas
|
||||||
|
- Tables, columns, and data types
|
||||||
|
- Constraints (PK, FK, unique, check)
|
||||||
|
- Indexes
|
||||||
|
- Relationships
|
||||||
|
- Views and sequences
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &readers.ReaderOptions{
|
||||||
|
FilePath: "/path/to/schema.yaml",
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := yaml.NewReader(options)
|
||||||
|
db, err := reader.ReadDatabase()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %d schemas\n", len(db.Schemas))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Read YAML schema and convert to GORM models
|
||||||
|
relspec --input yaml --in-file schema.yaml --output gorm --out-file models.go
|
||||||
|
|
||||||
|
# Convert YAML to PostgreSQL DDL
|
||||||
|
relspec --input yaml --in-file database.yaml --output pgsql --out-file schema.sql
|
||||||
|
|
||||||
|
# Transform YAML to JSON
|
||||||
|
relspec --input yaml --in-file schema.yaml --output json --out-file schema.json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example YAML Schema
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
name: myapp
|
||||||
|
database_type: postgresql
|
||||||
|
schemas:
|
||||||
|
- name: public
|
||||||
|
tables:
|
||||||
|
- name: users
|
||||||
|
schema: public
|
||||||
|
columns:
|
||||||
|
id:
|
||||||
|
name: id
|
||||||
|
type: bigint
|
||||||
|
not_null: true
|
||||||
|
is_primary_key: true
|
||||||
|
auto_increment: true
|
||||||
|
sequence: 1
|
||||||
|
username:
|
||||||
|
name: username
|
||||||
|
type: varchar
|
||||||
|
length: 50
|
||||||
|
not_null: true
|
||||||
|
sequence: 2
|
||||||
|
email:
|
||||||
|
name: email
|
||||||
|
type: varchar
|
||||||
|
length: 100
|
||||||
|
not_null: true
|
||||||
|
sequence: 3
|
||||||
|
constraints:
|
||||||
|
pk_users:
|
||||||
|
name: pk_users
|
||||||
|
type: PRIMARY KEY
|
||||||
|
columns:
|
||||||
|
- id
|
||||||
|
uq_users_username:
|
||||||
|
name: uq_users_username
|
||||||
|
type: UNIQUE
|
||||||
|
columns:
|
||||||
|
- username
|
||||||
|
indexes:
|
||||||
|
idx_users_email:
|
||||||
|
name: idx_users_email
|
||||||
|
columns:
|
||||||
|
- email
|
||||||
|
unique: false
|
||||||
|
type: btree
|
||||||
|
- name: posts
|
||||||
|
schema: public
|
||||||
|
columns:
|
||||||
|
id:
|
||||||
|
name: id
|
||||||
|
type: bigint
|
||||||
|
not_null: true
|
||||||
|
is_primary_key: true
|
||||||
|
sequence: 1
|
||||||
|
user_id:
|
||||||
|
name: user_id
|
||||||
|
type: bigint
|
||||||
|
not_null: true
|
||||||
|
sequence: 2
|
||||||
|
title:
|
||||||
|
name: title
|
||||||
|
type: varchar
|
||||||
|
length: 200
|
||||||
|
not_null: true
|
||||||
|
sequence: 3
|
||||||
|
constraints:
|
||||||
|
fk_posts_user_id:
|
||||||
|
name: fk_posts_user_id
|
||||||
|
type: FOREIGN KEY
|
||||||
|
columns:
|
||||||
|
- user_id
|
||||||
|
referenced_table: users
|
||||||
|
referenced_schema: public
|
||||||
|
referenced_columns:
|
||||||
|
- id
|
||||||
|
on_delete: CASCADE
|
||||||
|
on_update: NO ACTION
|
||||||
|
```
|
||||||
|
|
||||||
|
## Schema Structure
|
||||||
|
|
||||||
|
The YAML format mirrors RelSpec's internal model structure with human-readable syntax:
|
||||||
|
|
||||||
|
- Database level: `name`, `database_type`, `schemas`
|
||||||
|
- Schema level: `name`, `tables`, `views`, `sequences`
|
||||||
|
- Table level: `name`, `schema`, `columns`, `constraints`, `indexes`, `relationships`
|
||||||
|
- Column level: `name`, `type`, `length`, `not_null`, `default`, etc.
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- YAML format is more human-readable than JSON
|
||||||
|
- Ideal for manual editing and version control
|
||||||
|
- Comments are supported in YAML
|
||||||
|
- Preserves complete schema information
|
||||||
|
- Can be used for configuration and documentation
|
||||||
129
pkg/writers/bun/README.md
Normal file
129
pkg/writers/bun/README.md
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
# Bun Writer
|
||||||
|
|
||||||
|
Generates Go source files with Bun model definitions from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Bun Writer converts RelSpec's internal database model representation into Go source code with Bun struct definitions, complete with proper tags, relationships, and table configuration.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates Bun-compatible Go structs
|
||||||
|
- Creates proper `bun` struct tags
|
||||||
|
- Adds relationship fields
|
||||||
|
- Supports both single-file and multi-file output
|
||||||
|
- Maps SQL types to Go types
|
||||||
|
- Handles nullable fields with sql.Null* types
|
||||||
|
- Generates table aliases
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "models.go",
|
||||||
|
PackageName: "models",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := bun.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate Bun models from PostgreSQL database
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output bun \
|
||||||
|
--out-file models.go \
|
||||||
|
--package models
|
||||||
|
|
||||||
|
# Convert GORM models to Bun
|
||||||
|
relspec --input gorm --in-file gorm_models.go --output bun --out-file bun_models.go
|
||||||
|
|
||||||
|
# Multi-file output
|
||||||
|
relspec --input json --in-file schema.json --output bun --out-file models/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated Code Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
"database/sql"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
bun.BaseModel `bun:"table:users,alias:u"`
|
||||||
|
|
||||||
|
ID int64 `bun:"id,pk,autoincrement" json:"id"`
|
||||||
|
Username string `bun:"username,notnull,unique" json:"username"`
|
||||||
|
Email string `bun:"email,notnull" json:"email"`
|
||||||
|
Bio sql.NullString `bun:"bio" json:"bio,omitempty"`
|
||||||
|
CreatedAt time.Time `bun:"created_at,notnull,default:now()" json:"created_at"`
|
||||||
|
|
||||||
|
// Relationships
|
||||||
|
Posts []*Post `bun:"rel:has-many,join:id=user_id" json:"posts,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Post struct {
|
||||||
|
bun.BaseModel `bun:"table:posts,alias:p"`
|
||||||
|
|
||||||
|
ID int64 `bun:"id,pk" json:"id"`
|
||||||
|
UserID int64 `bun:"user_id,notnull" json:"user_id"`
|
||||||
|
Title string `bun:"title,notnull" json:"title"`
|
||||||
|
Content sql.NullString `bun:"content" json:"content,omitempty"`
|
||||||
|
|
||||||
|
// Belongs to
|
||||||
|
User *User `bun:"rel:belongs-to,join:user_id=id" json:"user,omitempty"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Bun Tags
|
||||||
|
|
||||||
|
- `table` - Table name and alias
|
||||||
|
- `column` - Column name (auto-derived if not specified)
|
||||||
|
- `pk` - Primary key
|
||||||
|
- `autoincrement` - Auto-increment
|
||||||
|
- `notnull` - NOT NULL constraint
|
||||||
|
- `unique` - Unique constraint
|
||||||
|
- `default` - Default value
|
||||||
|
- `rel` - Relationship definition
|
||||||
|
- `type` - Explicit SQL type
|
||||||
|
|
||||||
|
## Type Mapping
|
||||||
|
|
||||||
|
| SQL Type | Go Type | Nullable Type |
|
||||||
|
|----------|---------|---------------|
|
||||||
|
| bigint | int64 | sql.NullInt64 |
|
||||||
|
| integer | int | sql.NullInt32 |
|
||||||
|
| varchar, text | string | sql.NullString |
|
||||||
|
| boolean | bool | sql.NullBool |
|
||||||
|
| timestamp | time.Time | sql.NullTime |
|
||||||
|
| numeric | float64 | sql.NullFloat64 |
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Model names are derived from table names (singularized, PascalCase)
|
||||||
|
- Table aliases are auto-generated from table names
|
||||||
|
- Multi-file mode: one file per table named `sql_{schema}_{table}.go`
|
||||||
|
- Generated code is auto-formatted
|
||||||
|
- JSON tags are automatically added
|
||||||
@@ -41,12 +41,7 @@ func NewWriter(options *writers.WriterOptions) *Writer {
|
|||||||
// WriteDatabase writes a complete database as Bun models
|
// WriteDatabase writes a complete database as Bun models
|
||||||
func (w *Writer) WriteDatabase(db *models.Database) error {
|
func (w *Writer) WriteDatabase(db *models.Database) error {
|
||||||
// Check if multi-file mode is enabled
|
// Check if multi-file mode is enabled
|
||||||
multiFile := false
|
multiFile := w.shouldUseMultiFile()
|
||||||
if w.options.Metadata != nil {
|
|
||||||
if mf, ok := w.options.Metadata["multi_file"].(bool); ok {
|
|
||||||
multiFile = mf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if multiFile {
|
if multiFile {
|
||||||
return w.writeMultiFile(db)
|
return w.writeMultiFile(db)
|
||||||
@@ -346,6 +341,41 @@ func (w *Writer) writeOutput(content string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shouldUseMultiFile determines whether to use multi-file mode based on metadata or output path
|
||||||
|
func (w *Writer) shouldUseMultiFile() bool {
|
||||||
|
// Check if multi_file is explicitly set in metadata
|
||||||
|
if w.options.Metadata != nil {
|
||||||
|
if mf, ok := w.options.Metadata["multi_file"].(bool); ok {
|
||||||
|
return mf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto-detect based on output path
|
||||||
|
if w.options.OutputPath == "" {
|
||||||
|
// No output path means stdout (single file)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path ends with .go (explicit file)
|
||||||
|
if strings.HasSuffix(w.options.OutputPath, ".go") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path ends with directory separator
|
||||||
|
if strings.HasSuffix(w.options.OutputPath, "/") || strings.HasSuffix(w.options.OutputPath, "\\") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path exists and is a directory
|
||||||
|
info, err := os.Stat(w.options.OutputPath)
|
||||||
|
if err == nil && info.IsDir() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to single file for ambiguous cases
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// createDatabaseRef creates a shallow copy of database without schemas to avoid circular references
|
// createDatabaseRef creates a shallow copy of database without schemas to avoid circular references
|
||||||
func (w *Writer) createDatabaseRef(db *models.Database) *models.Database {
|
func (w *Writer) createDatabaseRef(db *models.Database) *models.Database {
|
||||||
return &models.Database{
|
return &models.Database{
|
||||||
|
|||||||
161
pkg/writers/dbml/README.md
Normal file
161
pkg/writers/dbml/README.md
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# DBML Writer
|
||||||
|
|
||||||
|
Generates Database Markup Language (DBML) files from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The DBML Writer converts RelSpec's internal database model representation into DBML syntax, suitable for use with dbdiagram.io and other DBML-compatible tools.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates DBML syntax
|
||||||
|
- Creates table definitions with columns
|
||||||
|
- Defines relationships
|
||||||
|
- Includes indexes
|
||||||
|
- Adds notes and documentation
|
||||||
|
- Supports enums
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/dbml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.dbml",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := dbml.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate DBML from PostgreSQL database
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output dbml \
|
||||||
|
--out-file schema.dbml
|
||||||
|
|
||||||
|
# Convert GORM models to DBML
|
||||||
|
relspec --input gorm --in-file models.go --output dbml --out-file database.dbml
|
||||||
|
|
||||||
|
# Convert JSON to DBML for visualization
|
||||||
|
relspec --input json --in-file schema.json --output dbml --out-file diagram.dbml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated DBML Example
|
||||||
|
|
||||||
|
```dbml
|
||||||
|
Project MyDatabase {
|
||||||
|
database_type: 'PostgreSQL'
|
||||||
|
}
|
||||||
|
|
||||||
|
Table users {
|
||||||
|
id bigserial [pk, increment]
|
||||||
|
username varchar(50) [not null, unique]
|
||||||
|
email varchar(100) [not null]
|
||||||
|
bio text [null]
|
||||||
|
created_at timestamp [not null, default: `now()`]
|
||||||
|
|
||||||
|
Note: 'Users table'
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
email [name: 'idx_users_email']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Table posts {
|
||||||
|
id bigserial [pk, increment]
|
||||||
|
user_id bigint [not null]
|
||||||
|
title varchar(200) [not null]
|
||||||
|
content text [null]
|
||||||
|
created_at timestamp [default: `now()`]
|
||||||
|
|
||||||
|
indexes {
|
||||||
|
user_id [name: 'idx_posts_user_id']
|
||||||
|
(user_id, created_at) [name: 'idx_posts_user_created']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ref: posts.user_id > users.id [delete: cascade, update: no action]
|
||||||
|
```
|
||||||
|
|
||||||
|
## DBML Features
|
||||||
|
|
||||||
|
### Table Definitions
|
||||||
|
```dbml
|
||||||
|
Table table_name {
|
||||||
|
column_name type [attributes]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Column Attributes
|
||||||
|
- `pk` - Primary key
|
||||||
|
- `increment` - Auto-increment
|
||||||
|
- `not null` - NOT NULL constraint
|
||||||
|
- `null` - Nullable (explicit)
|
||||||
|
- `unique` - Unique constraint
|
||||||
|
- `default: value` - Default value
|
||||||
|
- `note: 'text'` - Column note
|
||||||
|
|
||||||
|
### Relationships
|
||||||
|
```dbml
|
||||||
|
Ref: table1.column > table2.column
|
||||||
|
Ref: table1.column < table2.column
|
||||||
|
Ref: table1.column - table2.column
|
||||||
|
```
|
||||||
|
|
||||||
|
Relationship types:
|
||||||
|
- `>` - Many-to-one
|
||||||
|
- `<` - One-to-many
|
||||||
|
- `-` - One-to-one
|
||||||
|
|
||||||
|
Relationship actions:
|
||||||
|
```dbml
|
||||||
|
Ref: posts.user_id > users.id [delete: cascade, update: restrict]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Indexes
|
||||||
|
```dbml
|
||||||
|
indexes {
|
||||||
|
column_name
|
||||||
|
(column1, column2) [name: 'idx_name', unique]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Type Mapping
|
||||||
|
|
||||||
|
| SQL Type | DBML Type |
|
||||||
|
|----------|-----------|
|
||||||
|
| bigint | bigint |
|
||||||
|
| integer | int |
|
||||||
|
| varchar(n) | varchar(n) |
|
||||||
|
| text | text |
|
||||||
|
| boolean | boolean |
|
||||||
|
| timestamp | timestamp |
|
||||||
|
| date | date |
|
||||||
|
| json | json |
|
||||||
|
| uuid | uuid |
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- DBML is designed for database visualization
|
||||||
|
- Can be imported into dbdiagram.io
|
||||||
|
- Human-readable format
|
||||||
|
- Schema names can be included in table names
|
||||||
|
- Comments and notes are preserved
|
||||||
|
- Ideal for documentation and sharing designs
|
||||||
111
pkg/writers/dctx/README.md
Normal file
111
pkg/writers/dctx/README.md
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
# DCTX Writer
|
||||||
|
|
||||||
|
Generates Clarion database dictionary (DCTX) files from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The DCTX Writer converts RelSpec's internal database model representation into Clarion dictionary XML format, used by the Clarion development platform.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates DCTX XML format
|
||||||
|
- Creates file (table) definitions
|
||||||
|
- Defines fields (columns) with Clarion types
|
||||||
|
- Includes keys (indexes)
|
||||||
|
- Handles relationships
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/dctx"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "database.dctx",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := dctx.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate DCTX from PostgreSQL database (for Clarion migration)
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output dctx \
|
||||||
|
--out-file app.dctx
|
||||||
|
|
||||||
|
# Convert GORM models to DCTX
|
||||||
|
relspec --input gorm --in-file models.go --output dctx --out-file legacy.dctx
|
||||||
|
|
||||||
|
# Convert JSON schema to DCTX
|
||||||
|
relspec --input json --in-file schema.json --output dctx --out-file database.dctx
|
||||||
|
```
|
||||||
|
|
||||||
|
## Type Mapping
|
||||||
|
|
||||||
|
Converts standard SQL types to Clarion types:
|
||||||
|
|
||||||
|
| SQL Type | Clarion Type | Notes |
|
||||||
|
|----------|--------------|-------|
|
||||||
|
| VARCHAR(n) | STRING(n) | Fixed-length string |
|
||||||
|
| TEXT | STRING | Variable length |
|
||||||
|
| INTEGER | LONG | 32-bit integer |
|
||||||
|
| BIGINT | DECIMAL(20,0) | Large integer |
|
||||||
|
| SMALLINT | SHORT | 16-bit integer |
|
||||||
|
| NUMERIC(p,s) | DECIMAL(p,s) | Decimal number |
|
||||||
|
| REAL, FLOAT | REAL | Floating point |
|
||||||
|
| BOOLEAN | BYTE | 0/1 value |
|
||||||
|
| DATE | DATE | Date field |
|
||||||
|
| TIME | TIME | Time field |
|
||||||
|
| TIMESTAMP | LONG | Unix timestamp |
|
||||||
|
|
||||||
|
## DCTX Structure
|
||||||
|
|
||||||
|
DCTX files are XML-based with this structure:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<dictionary>
|
||||||
|
<file name="USERS" driver="TOPSPEED">
|
||||||
|
<record>
|
||||||
|
<field name="ID" type="LONG" />
|
||||||
|
<field name="USERNAME" type="STRING" bytes="50" />
|
||||||
|
<field name="EMAIL" type="STRING" bytes="100" />
|
||||||
|
</record>
|
||||||
|
<key name="KEY_PRIMARY" primary="true">
|
||||||
|
<field name="ID" />
|
||||||
|
</key>
|
||||||
|
</file>
|
||||||
|
</dictionary>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- File definitions (equivalent to tables)
|
||||||
|
- Field definitions with Clarion-specific types
|
||||||
|
- Key definitions (primary and foreign)
|
||||||
|
- Relationships between files
|
||||||
|
- Driver specifications (TOPSPEED, SQL, etc.)
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- DCTX is specific to Clarion development
|
||||||
|
- Useful for legacy system integration
|
||||||
|
- Field names are typically uppercase in Clarion
|
||||||
|
- Supports Clarion-specific attributes
|
||||||
|
- Can be imported into Clarion IDE
|
||||||
182
pkg/writers/drawdb/README.md
Normal file
182
pkg/writers/drawdb/README.md
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
# DrawDB Writer
|
||||||
|
|
||||||
|
Generates DrawDB-compatible JSON files from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The DrawDB Writer converts RelSpec's internal database model representation into JSON format compatible with DrawDB, a free online database design tool.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates DrawDB JSON format
|
||||||
|
- Creates table and field definitions
|
||||||
|
- Defines relationships
|
||||||
|
- Includes visual layout information
|
||||||
|
- Preserves constraints and indexes
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "diagram.json",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := drawdb.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate DrawDB diagram from PostgreSQL database
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output drawdb \
|
||||||
|
--out-file diagram.json
|
||||||
|
|
||||||
|
# Convert GORM models to DrawDB for visualization
|
||||||
|
relspec --input gorm --in-file models.go --output drawdb --out-file design.json
|
||||||
|
|
||||||
|
# Convert JSON schema to DrawDB
|
||||||
|
relspec --input json --in-file schema.json --output drawdb --out-file diagram.json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated JSON Example
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": "1.0",
|
||||||
|
"database": "PostgreSQL",
|
||||||
|
"tables": [
|
||||||
|
{
|
||||||
|
"id": "1",
|
||||||
|
"name": "users",
|
||||||
|
"x": 100,
|
||||||
|
"y": 100,
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"id": "1",
|
||||||
|
"name": "id",
|
||||||
|
"type": "BIGINT",
|
||||||
|
"primary": true,
|
||||||
|
"autoIncrement": true,
|
||||||
|
"notNull": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "2",
|
||||||
|
"name": "username",
|
||||||
|
"type": "VARCHAR",
|
||||||
|
"size": 50,
|
||||||
|
"notNull": true,
|
||||||
|
"unique": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "3",
|
||||||
|
"name": "email",
|
||||||
|
"type": "VARCHAR",
|
||||||
|
"size": 100,
|
||||||
|
"notNull": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"indexes": [
|
||||||
|
{
|
||||||
|
"name": "idx_users_email",
|
||||||
|
"fields": ["email"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "2",
|
||||||
|
"name": "posts",
|
||||||
|
"x": 400,
|
||||||
|
"y": 100,
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"id": "1",
|
||||||
|
"name": "id",
|
||||||
|
"type": "BIGINT",
|
||||||
|
"primary": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "2",
|
||||||
|
"name": "user_id",
|
||||||
|
"type": "BIGINT",
|
||||||
|
"notNull": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "3",
|
||||||
|
"name": "title",
|
||||||
|
"type": "VARCHAR",
|
||||||
|
"size": 200,
|
||||||
|
"notNull": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"relationships": [
|
||||||
|
{
|
||||||
|
"id": "1",
|
||||||
|
"source": "2",
|
||||||
|
"target": "1",
|
||||||
|
"sourceField": "user_id",
|
||||||
|
"targetField": "id",
|
||||||
|
"type": "many-to-one",
|
||||||
|
"onDelete": "CASCADE"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## DrawDB Features
|
||||||
|
|
||||||
|
### Table Properties
|
||||||
|
- `id` - Unique table identifier
|
||||||
|
- `name` - Table name
|
||||||
|
- `x`, `y` - Position in diagram
|
||||||
|
- `fields` - Array of field definitions
|
||||||
|
- `indexes` - Array of index definitions
|
||||||
|
|
||||||
|
### Field Properties
|
||||||
|
- `id` - Unique field identifier
|
||||||
|
- `name` - Field name
|
||||||
|
- `type` - Data type (BIGINT, VARCHAR, etc.)
|
||||||
|
- `size` - Length for string types
|
||||||
|
- `primary` - Primary key flag
|
||||||
|
- `notNull` - NOT NULL constraint
|
||||||
|
- `unique` - Unique constraint
|
||||||
|
- `autoIncrement` - Auto-increment flag
|
||||||
|
- `default` - Default value
|
||||||
|
|
||||||
|
### Relationship Properties
|
||||||
|
- `id` - Unique relationship identifier
|
||||||
|
- `source` - Source table ID
|
||||||
|
- `target` - Target table ID
|
||||||
|
- `sourceField` - Foreign key field
|
||||||
|
- `targetField` - Referenced field
|
||||||
|
- `type` - Relationship type (one-to-one, one-to-many, many-to-one)
|
||||||
|
- `onDelete` - Delete action
|
||||||
|
- `onUpdate` - Update action
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- DrawDB is available at drawdb.vercel.app
|
||||||
|
- Generated files can be imported for visual editing
|
||||||
|
- Visual positions (x, y) are auto-generated
|
||||||
|
- Ideal for creating ERD diagrams
|
||||||
|
- Supports modern database features
|
||||||
|
- Free and open-source tool
|
||||||
120
pkg/writers/drizzle/README.md
Normal file
120
pkg/writers/drizzle/README.md
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
# Drizzle Writer
|
||||||
|
|
||||||
|
Generates TypeScript/JavaScript files with Drizzle ORM schema definitions from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Drizzle Writer converts RelSpec's internal database model representation into TypeScript source code with Drizzle ORM schema definitions, including tables, columns, relationships, and constraints.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates Drizzle-compatible TypeScript schema
|
||||||
|
- Supports PostgreSQL and MySQL schemas
|
||||||
|
- Creates table definitions with proper column types
|
||||||
|
- Generates relationship definitions
|
||||||
|
- Handles constraints and indexes
|
||||||
|
- Outputs formatted TypeScript code
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/drizzle"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.ts",
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"database_type": "postgresql", // or "mysql"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := drizzle.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate Drizzle schema from PostgreSQL database
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output drizzle \
|
||||||
|
--out-file schema.ts
|
||||||
|
|
||||||
|
# Convert GORM models to Drizzle
|
||||||
|
relspec --input gorm --in-file models.go --output drizzle --out-file schema.ts
|
||||||
|
|
||||||
|
# Convert JSON schema to Drizzle
|
||||||
|
relspec --input json --in-file schema.json --output drizzle --out-file db/schema.ts
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated Code Example
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { pgTable, serial, varchar, text, timestamp, integer } from 'drizzle-orm/pg-core';
|
||||||
|
import { relations } from 'drizzle-orm';
|
||||||
|
|
||||||
|
export const users = pgTable('users', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
username: varchar('username', { length: 50 }).notNull().unique(),
|
||||||
|
email: varchar('email', { length: 100 }).notNull(),
|
||||||
|
bio: text('bio'),
|
||||||
|
createdAt: timestamp('created_at').notNull().defaultNow(),
|
||||||
|
});
|
||||||
|
|
||||||
|
export const posts = pgTable('posts', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
userId: integer('user_id').notNull().references(() => users.id, { onDelete: 'cascade' }),
|
||||||
|
title: varchar('title', { length: 200 }).notNull(),
|
||||||
|
content: text('content'),
|
||||||
|
});
|
||||||
|
|
||||||
|
export const usersRelations = relations(users, ({ many }) => ({
|
||||||
|
posts: many(posts),
|
||||||
|
}));
|
||||||
|
|
||||||
|
export const postsRelations = relations(posts, ({ one }) => ({
|
||||||
|
user: one(users, {
|
||||||
|
fields: [posts.userId],
|
||||||
|
references: [users.id],
|
||||||
|
}),
|
||||||
|
}));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Column Types
|
||||||
|
|
||||||
|
### PostgreSQL
|
||||||
|
- `serial`, `bigserial` - Auto-increment integers
|
||||||
|
- `integer`, `bigint`, `smallint` - Integer types
|
||||||
|
- `varchar`, `text` - String types
|
||||||
|
- `boolean` - Boolean
|
||||||
|
- `timestamp`, `date`, `time` - Date/time types
|
||||||
|
- `json`, `jsonb` - JSON types
|
||||||
|
- `uuid` - UUID type
|
||||||
|
|
||||||
|
### MySQL
|
||||||
|
- `int`, `bigint`, `smallint` - Integer types
|
||||||
|
- `varchar`, `text` - String types
|
||||||
|
- `boolean` - Boolean
|
||||||
|
- `datetime`, `timestamp` - Date/time types
|
||||||
|
- `json` - JSON type
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Table names and column names are preserved as-is
|
||||||
|
- Relationships are generated as separate relation definitions
|
||||||
|
- Constraint actions (CASCADE, etc.) are included in references
|
||||||
|
- Schema names other than 'public' are supported
|
||||||
|
- Output is formatted TypeScript code
|
||||||
221
pkg/writers/drizzle/template_data.go
Normal file
221
pkg/writers/drizzle/template_data.go
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
package drizzle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TemplateData represents the data passed to the template for code generation
|
||||||
|
type TemplateData struct {
|
||||||
|
Imports []string
|
||||||
|
Enums []*EnumData
|
||||||
|
Tables []*TableData
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumData represents an enum in the schema
|
||||||
|
type EnumData struct {
|
||||||
|
Name string // Enum name (PascalCase)
|
||||||
|
VarName string // Variable name for the enum (camelCase)
|
||||||
|
Values []string // Enum values
|
||||||
|
ValuesStr string // Comma-separated quoted values for pgEnum()
|
||||||
|
TypeUnion string // TypeScript union type (e.g., "'admin' | 'user' | 'guest'")
|
||||||
|
SchemaName string // Schema name
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableData represents a table in the template
|
||||||
|
type TableData struct {
|
||||||
|
Name string // Table variable name (camelCase, e.g., users)
|
||||||
|
TableName string // Actual database table name (e.g., users)
|
||||||
|
TypeName string // TypeScript type name (PascalCase, e.g., Users)
|
||||||
|
Columns []*ColumnData // Column definitions
|
||||||
|
Indexes []*IndexData // Index definitions
|
||||||
|
Comment string // Table comment
|
||||||
|
SchemaName string // Schema name
|
||||||
|
NeedsSQLTag bool // Whether we need to import 'sql' from drizzle-orm
|
||||||
|
IndexColumnFields []string // Column field names used in indexes (for destructuring)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ColumnData represents a column in a table
|
||||||
|
type ColumnData struct {
|
||||||
|
Name string // Column name in database
|
||||||
|
FieldName string // Field name in TypeScript (camelCase)
|
||||||
|
DrizzleChain string // Complete Drizzle column chain (e.g., "integer('id').primaryKey()")
|
||||||
|
TypeScriptType string // TypeScript type for interface (e.g., "string", "number | null")
|
||||||
|
IsForeignKey bool // Whether this is a foreign key
|
||||||
|
ReferencesLine string // The .references() line if FK
|
||||||
|
Comment string // Column comment
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexData represents an index definition
|
||||||
|
type IndexData struct {
|
||||||
|
Name string // Index name
|
||||||
|
Columns []string // Column names
|
||||||
|
IsUnique bool // Whether it's a unique index
|
||||||
|
Definition string // Complete index definition line
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTemplateData creates a new TemplateData
|
||||||
|
func NewTemplateData() *TemplateData {
|
||||||
|
return &TemplateData{
|
||||||
|
Imports: make([]string, 0),
|
||||||
|
Enums: make([]*EnumData, 0),
|
||||||
|
Tables: make([]*TableData, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddImport adds an import to the template data (deduplicates automatically)
|
||||||
|
func (td *TemplateData) AddImport(importLine string) {
|
||||||
|
// Check if already exists
|
||||||
|
for _, imp := range td.Imports {
|
||||||
|
if imp == importLine {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
td.Imports = append(td.Imports, importLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddEnum adds an enum to the template data
|
||||||
|
func (td *TemplateData) AddEnum(enum *EnumData) {
|
||||||
|
td.Enums = append(td.Enums, enum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddTable adds a table to the template data
|
||||||
|
func (td *TemplateData) AddTable(table *TableData) {
|
||||||
|
td.Tables = append(td.Tables, table)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizeImports sorts imports
|
||||||
|
func (td *TemplateData) FinalizeImports() {
|
||||||
|
sort.Strings(td.Imports)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEnumData creates EnumData from a models.Enum
|
||||||
|
func NewEnumData(enum *models.Enum, tm *TypeMapper) *EnumData {
|
||||||
|
// Keep enum name as-is (it should already be PascalCase from the source)
|
||||||
|
enumName := enum.Name
|
||||||
|
// Variable name is camelCase version
|
||||||
|
varName := tm.ToCamelCase(enum.Name)
|
||||||
|
|
||||||
|
// Format values as comma-separated quoted strings for pgEnum()
|
||||||
|
quotedValues := make([]string, len(enum.Values))
|
||||||
|
for i, v := range enum.Values {
|
||||||
|
quotedValues[i] = "'" + v + "'"
|
||||||
|
}
|
||||||
|
valuesStr := ""
|
||||||
|
for i, qv := range quotedValues {
|
||||||
|
if i > 0 {
|
||||||
|
valuesStr += ", "
|
||||||
|
}
|
||||||
|
valuesStr += qv
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build TypeScript union type (e.g., "'admin' | 'user' | 'guest'")
|
||||||
|
typeUnion := ""
|
||||||
|
for i, qv := range quotedValues {
|
||||||
|
if i > 0 {
|
||||||
|
typeUnion += " | "
|
||||||
|
}
|
||||||
|
typeUnion += qv
|
||||||
|
}
|
||||||
|
|
||||||
|
return &EnumData{
|
||||||
|
Name: enumName,
|
||||||
|
VarName: varName,
|
||||||
|
Values: enum.Values,
|
||||||
|
ValuesStr: valuesStr,
|
||||||
|
TypeUnion: typeUnion,
|
||||||
|
SchemaName: enum.Schema,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTableData creates TableData from a models.Table
|
||||||
|
func NewTableData(table *models.Table, tm *TypeMapper) *TableData {
|
||||||
|
tableName := tm.ToCamelCase(table.Name)
|
||||||
|
typeName := tm.ToPascalCase(table.Name)
|
||||||
|
|
||||||
|
return &TableData{
|
||||||
|
Name: tableName,
|
||||||
|
TableName: table.Name,
|
||||||
|
TypeName: typeName,
|
||||||
|
Columns: make([]*ColumnData, 0),
|
||||||
|
Indexes: make([]*IndexData, 0),
|
||||||
|
Comment: formatComment(table.Description, table.Comment),
|
||||||
|
SchemaName: table.Schema,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddColumn adds a column to the table data
|
||||||
|
func (td *TableData) AddColumn(col *ColumnData) {
|
||||||
|
td.Columns = append(td.Columns, col)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddIndex adds an index to the table data
|
||||||
|
func (td *TableData) AddIndex(idx *IndexData) {
|
||||||
|
td.Indexes = append(td.Indexes, idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColumnData creates ColumnData from a models.Column
|
||||||
|
func NewColumnData(col *models.Column, table *models.Table, tm *TypeMapper, isEnum bool) *ColumnData {
|
||||||
|
fieldName := tm.ToCamelCase(col.Name)
|
||||||
|
drizzleChain := tm.BuildColumnChain(col, table, isEnum)
|
||||||
|
|
||||||
|
return &ColumnData{
|
||||||
|
Name: col.Name,
|
||||||
|
FieldName: fieldName,
|
||||||
|
DrizzleChain: drizzleChain,
|
||||||
|
Comment: formatComment(col.Description, col.Comment),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndexData creates IndexData from a models.Index
|
||||||
|
func NewIndexData(index *models.Index, tableVar string, tm *TypeMapper) *IndexData {
|
||||||
|
indexName := tm.ToCamelCase(index.Name) + "Idx"
|
||||||
|
|
||||||
|
// Build column references as field names (will be used with destructuring)
|
||||||
|
colRefs := make([]string, len(index.Columns))
|
||||||
|
for i, colName := range index.Columns {
|
||||||
|
// Use just the field name for destructured parameters
|
||||||
|
colRefs[i] = tm.ToCamelCase(colName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the complete definition
|
||||||
|
// Example: index('email_idx').on(email)
|
||||||
|
// or: uniqueIndex('unique_email_idx').on(email)
|
||||||
|
definition := ""
|
||||||
|
if index.Unique {
|
||||||
|
definition = "uniqueIndex('" + index.Name + "').on(" + joinStrings(colRefs, ", ") + ")"
|
||||||
|
} else {
|
||||||
|
definition = "index('" + index.Name + "').on(" + joinStrings(colRefs, ", ") + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &IndexData{
|
||||||
|
Name: indexName,
|
||||||
|
Columns: index.Columns,
|
||||||
|
IsUnique: index.Unique,
|
||||||
|
Definition: definition,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatComment combines description and comment into a single comment string
|
||||||
|
func formatComment(description, comment string) string {
|
||||||
|
if description != "" && comment != "" {
|
||||||
|
return description + " - " + comment
|
||||||
|
}
|
||||||
|
if description != "" {
|
||||||
|
return description
|
||||||
|
}
|
||||||
|
return comment
|
||||||
|
}
|
||||||
|
|
||||||
|
// joinStrings joins a slice of strings with a separator
|
||||||
|
func joinStrings(strs []string, sep string) string {
|
||||||
|
result := ""
|
||||||
|
for i, s := range strs {
|
||||||
|
if i > 0 {
|
||||||
|
result += sep
|
||||||
|
}
|
||||||
|
result += s
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
64
pkg/writers/drizzle/templates.go
Normal file
64
pkg/writers/drizzle/templates.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package drizzle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
// schemaTemplate defines the template for generating Drizzle schemas
|
||||||
|
const schemaTemplate = `// Code generated by relspecgo. DO NOT EDIT.
|
||||||
|
{{range .Imports}}{{.}}
|
||||||
|
{{end}}
|
||||||
|
{{if .Enums}}
|
||||||
|
// Enums
|
||||||
|
{{range .Enums}}export const {{.VarName}} = pgEnum('{{.Name}}', [{{.ValuesStr}}]);
|
||||||
|
export type {{.Name}} = {{.TypeUnion}};
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
{{range .Tables}}// Table: {{.TableName}}{{if .Comment}} - {{.Comment}}{{end}}
|
||||||
|
export interface {{.TypeName}} {
|
||||||
|
{{- range $i, $col := .Columns}}
|
||||||
|
{{$col.FieldName}}: {{$col.TypeScriptType}};{{if $col.Comment}} // {{$col.Comment}}{{end}}
|
||||||
|
{{- end}}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const {{.Name}} = pgTable('{{.TableName}}', {
|
||||||
|
{{- range $i, $col := .Columns}}
|
||||||
|
{{$col.FieldName}}: {{$col.DrizzleChain}},{{if $col.Comment}} // {{$col.Comment}}{{end}}
|
||||||
|
{{- end}}
|
||||||
|
}{{if .Indexes}}{{if .IndexColumnFields}}, ({ {{range $i, $field := .IndexColumnFields}}{{if $i}}, {{end}}{{$field}}{{end}} }) => [{{else}}, (table) => [{{end}}
|
||||||
|
{{- range $i, $idx := .Indexes}}
|
||||||
|
{{$idx.Definition}},
|
||||||
|
{{- end}}
|
||||||
|
]{{end}});
|
||||||
|
|
||||||
|
export type New{{.TypeName}} = typeof {{.Name}}.$inferInsert;
|
||||||
|
{{end}}`
|
||||||
|
|
||||||
|
// Templates holds the parsed templates
|
||||||
|
type Templates struct {
|
||||||
|
schemaTmpl *template.Template
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTemplates creates and parses the templates
|
||||||
|
func NewTemplates() (*Templates, error) {
|
||||||
|
schemaTmpl, err := template.New("schema").Parse(schemaTemplate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Templates{
|
||||||
|
schemaTmpl: schemaTmpl,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateCode executes the template with the given data
|
||||||
|
func (t *Templates) GenerateCode(data *TemplateData) (string, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err := t.schemaTmpl.Execute(&buf, data)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
318
pkg/writers/drizzle/type_mapper.go
Normal file
318
pkg/writers/drizzle/type_mapper.go
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
package drizzle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TypeMapper handles SQL to Drizzle type conversions
|
||||||
|
type TypeMapper struct{}
|
||||||
|
|
||||||
|
// NewTypeMapper creates a new TypeMapper instance
|
||||||
|
func NewTypeMapper() *TypeMapper {
|
||||||
|
return &TypeMapper{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SQLTypeToDrizzle converts SQL types to Drizzle column type functions
|
||||||
|
// Returns the Drizzle column constructor (e.g., "integer", "varchar", "text")
|
||||||
|
func (tm *TypeMapper) SQLTypeToDrizzle(sqlType string) string {
|
||||||
|
sqlTypeLower := strings.ToLower(sqlType)
|
||||||
|
|
||||||
|
// PostgreSQL type mapping to Drizzle
|
||||||
|
typeMap := map[string]string{
|
||||||
|
// Integer types
|
||||||
|
"integer": "integer",
|
||||||
|
"int": "integer",
|
||||||
|
"int4": "integer",
|
||||||
|
"smallint": "smallint",
|
||||||
|
"int2": "smallint",
|
||||||
|
"bigint": "bigint",
|
||||||
|
"int8": "bigint",
|
||||||
|
|
||||||
|
// Serial types
|
||||||
|
"serial": "serial",
|
||||||
|
"serial4": "serial",
|
||||||
|
"smallserial": "smallserial",
|
||||||
|
"serial2": "smallserial",
|
||||||
|
"bigserial": "bigserial",
|
||||||
|
"serial8": "bigserial",
|
||||||
|
|
||||||
|
// Numeric types
|
||||||
|
"numeric": "numeric",
|
||||||
|
"decimal": "numeric",
|
||||||
|
"real": "real",
|
||||||
|
"float4": "real",
|
||||||
|
"double precision": "doublePrecision",
|
||||||
|
"float": "doublePrecision",
|
||||||
|
"float8": "doublePrecision",
|
||||||
|
|
||||||
|
// Character types
|
||||||
|
"text": "text",
|
||||||
|
"varchar": "varchar",
|
||||||
|
"character varying": "varchar",
|
||||||
|
"char": "char",
|
||||||
|
"character": "char",
|
||||||
|
|
||||||
|
// Boolean
|
||||||
|
"boolean": "boolean",
|
||||||
|
"bool": "boolean",
|
||||||
|
|
||||||
|
// Binary
|
||||||
|
"bytea": "bytea",
|
||||||
|
|
||||||
|
// JSON types
|
||||||
|
"json": "json",
|
||||||
|
"jsonb": "jsonb",
|
||||||
|
|
||||||
|
// Date/Time types
|
||||||
|
"time": "time",
|
||||||
|
"timetz": "time",
|
||||||
|
"timestamp": "timestamp",
|
||||||
|
"timestamptz": "timestamp",
|
||||||
|
"date": "date",
|
||||||
|
"interval": "interval",
|
||||||
|
|
||||||
|
// UUID
|
||||||
|
"uuid": "uuid",
|
||||||
|
|
||||||
|
// Geometric types
|
||||||
|
"point": "point",
|
||||||
|
"line": "line",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for exact match first
|
||||||
|
if drizzleType, ok := typeMap[sqlTypeLower]; ok {
|
||||||
|
return drizzleType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for partial matches (e.g., "varchar(255)" -> "varchar")
|
||||||
|
for sqlPattern, drizzleType := range typeMap {
|
||||||
|
if strings.HasPrefix(sqlTypeLower, sqlPattern) {
|
||||||
|
return drizzleType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to text for unknown types
|
||||||
|
return "text"
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildColumnChain builds the complete column definition chain for Drizzle
|
||||||
|
// Example: integer('id').primaryKey().notNull()
|
||||||
|
func (tm *TypeMapper) BuildColumnChain(col *models.Column, table *models.Table, isEnum bool) string {
|
||||||
|
var parts []string
|
||||||
|
|
||||||
|
// Determine Drizzle column type
|
||||||
|
var drizzleType string
|
||||||
|
if isEnum {
|
||||||
|
// For enum types, use the type name directly
|
||||||
|
drizzleType = fmt.Sprintf("pgEnum('%s')", col.Type)
|
||||||
|
} else {
|
||||||
|
drizzleType = tm.SQLTypeToDrizzle(col.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start with column type and name
|
||||||
|
// Note: column name is passed as first argument to the column constructor
|
||||||
|
base := fmt.Sprintf("%s('%s')", drizzleType, col.Name)
|
||||||
|
parts = append(parts, base)
|
||||||
|
|
||||||
|
// Add column modifiers in order
|
||||||
|
modifiers := tm.buildColumnModifiers(col, table)
|
||||||
|
if len(modifiers) > 0 {
|
||||||
|
parts = append(parts, modifiers...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(parts, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildColumnModifiers builds an array of method calls for column modifiers
|
||||||
|
func (tm *TypeMapper) buildColumnModifiers(col *models.Column, table *models.Table) []string {
|
||||||
|
var modifiers []string
|
||||||
|
|
||||||
|
// Primary key
|
||||||
|
if col.IsPrimaryKey {
|
||||||
|
modifiers = append(modifiers, "primaryKey()")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not null constraint
|
||||||
|
if col.NotNull && !col.IsPrimaryKey {
|
||||||
|
modifiers = append(modifiers, "notNull()")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique constraint (check if there's a single-column unique constraint)
|
||||||
|
if tm.hasUniqueConstraint(col.Name, table) {
|
||||||
|
modifiers = append(modifiers, "unique()")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default value
|
||||||
|
if col.AutoIncrement {
|
||||||
|
// For auto-increment, use generatedAlwaysAsIdentity()
|
||||||
|
modifiers = append(modifiers, "generatedAlwaysAsIdentity()")
|
||||||
|
} else if col.Default != nil {
|
||||||
|
defaultValue := tm.formatDefaultValue(col.Default)
|
||||||
|
if defaultValue != "" {
|
||||||
|
modifiers = append(modifiers, fmt.Sprintf("default(%s)", defaultValue))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return modifiers
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatDefaultValue formats a default value for Drizzle
|
||||||
|
func (tm *TypeMapper) formatDefaultValue(defaultValue any) string {
|
||||||
|
switch v := defaultValue.(type) {
|
||||||
|
case string:
|
||||||
|
if v == "now()" || v == "CURRENT_TIMESTAMP" {
|
||||||
|
return "sql`now()`"
|
||||||
|
} else if v == "gen_random_uuid()" || strings.Contains(strings.ToLower(v), "uuid") {
|
||||||
|
return "sql`gen_random_uuid()`"
|
||||||
|
} else {
|
||||||
|
// Try to parse as number first
|
||||||
|
// Check if it's a numeric string that should be a number
|
||||||
|
if isNumericString(v) {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
// String literal
|
||||||
|
return fmt.Sprintf("'%s'", strings.ReplaceAll(v, "'", "\\'"))
|
||||||
|
}
|
||||||
|
case bool:
|
||||||
|
if v {
|
||||||
|
return "true"
|
||||||
|
}
|
||||||
|
return "false"
|
||||||
|
case int, int64, int32, int16, int8:
|
||||||
|
return fmt.Sprintf("%v", v)
|
||||||
|
case float32, float64:
|
||||||
|
return fmt.Sprintf("%v", v)
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%v", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNumericString checks if a string represents a number
|
||||||
|
func isNumericString(s string) bool {
|
||||||
|
if s == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Simple check for numeric strings
|
||||||
|
for i, c := range s {
|
||||||
|
if i == 0 && c == '-' {
|
||||||
|
continue // Allow negative sign at start
|
||||||
|
}
|
||||||
|
if c < '0' || c > '9' {
|
||||||
|
if c != '.' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasUniqueConstraint checks if a column has a unique constraint
|
||||||
|
func (tm *TypeMapper) hasUniqueConstraint(colName string, table *models.Table) bool {
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.UniqueConstraint &&
|
||||||
|
len(constraint.Columns) == 1 &&
|
||||||
|
constraint.Columns[0] == colName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildReferencesChain builds the .references() chain for foreign key columns
|
||||||
|
func (tm *TypeMapper) BuildReferencesChain(fk *models.Constraint, referencedTable string) string {
|
||||||
|
// Example: .references(() => users.id)
|
||||||
|
if len(fk.ReferencedColumns) > 0 {
|
||||||
|
// Use the referenced table variable name (camelCase)
|
||||||
|
refTableVar := tm.ToCamelCase(referencedTable)
|
||||||
|
refColumn := fk.ReferencedColumns[0]
|
||||||
|
return fmt.Sprintf("references(() => %s.%s)", refTableVar, refColumn)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToCamelCase converts snake_case or PascalCase to camelCase
|
||||||
|
func (tm *TypeMapper) ToCamelCase(s string) string {
|
||||||
|
if s == "" {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's snake_case
|
||||||
|
if strings.Contains(s, "_") {
|
||||||
|
parts := strings.Split(s, "_")
|
||||||
|
if len(parts) == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// First part stays lowercase
|
||||||
|
result := strings.ToLower(parts[0])
|
||||||
|
|
||||||
|
// Capitalize first letter of remaining parts
|
||||||
|
for i := 1; i < len(parts); i++ {
|
||||||
|
if len(parts[i]) > 0 {
|
||||||
|
result += strings.ToUpper(parts[i][:1]) + strings.ToLower(parts[i][1:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, assume it's PascalCase - just lowercase the first letter
|
||||||
|
return strings.ToLower(s[:1]) + s[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToPascalCase converts snake_case to PascalCase
|
||||||
|
func (tm *TypeMapper) ToPascalCase(s string) string {
|
||||||
|
parts := strings.Split(s, "_")
|
||||||
|
var result string
|
||||||
|
|
||||||
|
for _, part := range parts {
|
||||||
|
if len(part) > 0 {
|
||||||
|
result += strings.ToUpper(part[:1]) + strings.ToLower(part[1:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// DrizzleTypeToTypeScript converts Drizzle column types to TypeScript types
|
||||||
|
func (tm *TypeMapper) DrizzleTypeToTypeScript(drizzleType string, isEnum bool, enumName string) string {
|
||||||
|
if isEnum {
|
||||||
|
return enumName
|
||||||
|
}
|
||||||
|
|
||||||
|
typeMap := map[string]string{
|
||||||
|
"integer": "number",
|
||||||
|
"bigint": "number",
|
||||||
|
"smallint": "number",
|
||||||
|
"serial": "number",
|
||||||
|
"bigserial": "number",
|
||||||
|
"smallserial": "number",
|
||||||
|
"numeric": "number",
|
||||||
|
"real": "number",
|
||||||
|
"doublePrecision": "number",
|
||||||
|
"text": "string",
|
||||||
|
"varchar": "string",
|
||||||
|
"char": "string",
|
||||||
|
"boolean": "boolean",
|
||||||
|
"bytea": "Buffer",
|
||||||
|
"json": "any",
|
||||||
|
"jsonb": "any",
|
||||||
|
"timestamp": "Date",
|
||||||
|
"date": "Date",
|
||||||
|
"time": "Date",
|
||||||
|
"interval": "string",
|
||||||
|
"uuid": "string",
|
||||||
|
"point": "{ x: number; y: number }",
|
||||||
|
"line": "{ a: number; b: number; c: number }",
|
||||||
|
}
|
||||||
|
|
||||||
|
if tsType, ok := typeMap[drizzleType]; ok {
|
||||||
|
return tsType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to any for unknown types
|
||||||
|
return "any"
|
||||||
|
}
|
||||||
543
pkg/writers/drizzle/writer.go
Normal file
543
pkg/writers/drizzle/writer.go
Normal file
@@ -0,0 +1,543 @@
|
|||||||
|
package drizzle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Writer implements the writers.Writer interface for Drizzle ORM
|
||||||
|
type Writer struct {
|
||||||
|
options *writers.WriterOptions
|
||||||
|
typeMapper *TypeMapper
|
||||||
|
templates *Templates
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter creates a new Drizzle writer with the given options
|
||||||
|
func NewWriter(options *writers.WriterOptions) *Writer {
|
||||||
|
w := &Writer{
|
||||||
|
options: options,
|
||||||
|
typeMapper: NewTypeMapper(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize templates
|
||||||
|
tmpl, err := NewTemplates()
|
||||||
|
if err != nil {
|
||||||
|
// Should not happen with embedded templates
|
||||||
|
panic(fmt.Sprintf("failed to initialize templates: %v", err))
|
||||||
|
}
|
||||||
|
w.templates = tmpl
|
||||||
|
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteDatabase writes a complete database as Drizzle schema
|
||||||
|
func (w *Writer) WriteDatabase(db *models.Database) error {
|
||||||
|
// Check if multi-file mode is enabled
|
||||||
|
multiFile := w.shouldUseMultiFile()
|
||||||
|
|
||||||
|
if multiFile {
|
||||||
|
return w.writeMultiFile(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.writeSingleFile(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSchema writes a schema as Drizzle schema
|
||||||
|
func (w *Writer) WriteSchema(schema *models.Schema) error {
|
||||||
|
// Create a temporary database with just this schema
|
||||||
|
db := models.InitDatabase(schema.Name)
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
return w.WriteDatabase(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTable writes a single table as a Drizzle schema
|
||||||
|
func (w *Writer) WriteTable(table *models.Table) error {
|
||||||
|
// Create a temporary schema and database
|
||||||
|
schema := models.InitSchema(table.Schema)
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
|
||||||
|
db := models.InitDatabase(schema.Name)
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
return w.WriteDatabase(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeSingleFile writes all tables to a single file
|
||||||
|
func (w *Writer) writeSingleFile(db *models.Database) error {
|
||||||
|
templateData := NewTemplateData()
|
||||||
|
|
||||||
|
// Build enum map for quick lookup
|
||||||
|
enumMap := w.buildEnumMap(db)
|
||||||
|
|
||||||
|
// Process all schemas
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
// Add enums
|
||||||
|
for _, enum := range schema.Enums {
|
||||||
|
enumData := NewEnumData(enum, w.typeMapper)
|
||||||
|
templateData.AddEnum(enumData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add tables
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
tableData := w.buildTableData(table, schema, db, enumMap)
|
||||||
|
templateData.AddTable(tableData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add imports
|
||||||
|
w.addImports(templateData, db)
|
||||||
|
|
||||||
|
// Finalize imports
|
||||||
|
templateData.FinalizeImports()
|
||||||
|
|
||||||
|
// Generate code
|
||||||
|
code, err := w.templates.GenerateCode(templateData)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate code: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write output
|
||||||
|
return w.writeOutput(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeMultiFile writes each table to a separate file
|
||||||
|
func (w *Writer) writeMultiFile(db *models.Database) error {
|
||||||
|
// Ensure output path is a directory
|
||||||
|
if w.options.OutputPath == "" {
|
||||||
|
return fmt.Errorf("output path is required for multi-file mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create output directory if it doesn't exist
|
||||||
|
if err := os.MkdirAll(w.options.OutputPath, 0755); err != nil {
|
||||||
|
return fmt.Errorf("failed to create output directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build enum map for quick lookup
|
||||||
|
enumMap := w.buildEnumMap(db)
|
||||||
|
|
||||||
|
// Process all schemas
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
// Write enums file if there are any
|
||||||
|
if len(schema.Enums) > 0 {
|
||||||
|
if err := w.writeEnumsFile(schema); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write each table to a separate file
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if err := w.writeTableFile(table, schema, db, enumMap); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeEnumsFile writes all enums to a separate file
|
||||||
|
func (w *Writer) writeEnumsFile(schema *models.Schema) error {
|
||||||
|
templateData := NewTemplateData()
|
||||||
|
|
||||||
|
// Add enums
|
||||||
|
for _, enum := range schema.Enums {
|
||||||
|
enumData := NewEnumData(enum, w.typeMapper)
|
||||||
|
templateData.AddEnum(enumData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add imports for enums
|
||||||
|
templateData.AddImport("import { pgEnum } from 'drizzle-orm/pg-core';")
|
||||||
|
|
||||||
|
// Generate code
|
||||||
|
code, err := w.templates.GenerateCode(templateData)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate enums code: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write to enums.ts file
|
||||||
|
filename := filepath.Join(w.options.OutputPath, "enums.ts")
|
||||||
|
return os.WriteFile(filename, []byte(code), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeTableFile writes a single table to its own file
|
||||||
|
func (w *Writer) writeTableFile(table *models.Table, schema *models.Schema, db *models.Database, enumMap map[string]bool) error {
|
||||||
|
templateData := NewTemplateData()
|
||||||
|
|
||||||
|
// Build table data
|
||||||
|
tableData := w.buildTableData(table, schema, db, enumMap)
|
||||||
|
templateData.AddTable(tableData)
|
||||||
|
|
||||||
|
// Add imports
|
||||||
|
w.addImports(templateData, db)
|
||||||
|
|
||||||
|
// If there are enums, add import from enums file
|
||||||
|
if len(schema.Enums) > 0 && w.tableUsesEnum(table, enumMap) {
|
||||||
|
// Import enum definitions from enums.ts
|
||||||
|
enumNames := w.getTableEnumNames(table, schema, enumMap)
|
||||||
|
if len(enumNames) > 0 {
|
||||||
|
importLine := fmt.Sprintf("import { %s } from './enums';", strings.Join(enumNames, ", "))
|
||||||
|
templateData.AddImport(importLine)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize imports
|
||||||
|
templateData.FinalizeImports()
|
||||||
|
|
||||||
|
// Generate code
|
||||||
|
code, err := w.templates.GenerateCode(templateData)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate code for table %s: %w", table.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate filename: {tableName}.ts
|
||||||
|
filename := filepath.Join(w.options.OutputPath, table.Name+".ts")
|
||||||
|
return os.WriteFile(filename, []byte(code), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildTableData builds TableData from a models.Table
|
||||||
|
func (w *Writer) buildTableData(table *models.Table, schema *models.Schema, db *models.Database, enumMap map[string]bool) *TableData {
|
||||||
|
tableData := NewTableData(table, w.typeMapper)
|
||||||
|
|
||||||
|
// Add columns
|
||||||
|
for _, colName := range w.getSortedColumnNames(table) {
|
||||||
|
col := table.Columns[colName]
|
||||||
|
|
||||||
|
// Check if this column uses an enum
|
||||||
|
isEnum := enumMap[col.Type]
|
||||||
|
|
||||||
|
columnData := NewColumnData(col, table, w.typeMapper, isEnum)
|
||||||
|
|
||||||
|
// Set TypeScript type
|
||||||
|
drizzleType := w.typeMapper.SQLTypeToDrizzle(col.Type)
|
||||||
|
enumName := ""
|
||||||
|
if isEnum {
|
||||||
|
// For enums, use the enum type name
|
||||||
|
enumName = col.Type
|
||||||
|
}
|
||||||
|
baseType := w.typeMapper.DrizzleTypeToTypeScript(drizzleType, isEnum, enumName)
|
||||||
|
|
||||||
|
// Add null union if column is nullable
|
||||||
|
if !col.NotNull && !col.IsPrimaryKey {
|
||||||
|
columnData.TypeScriptType = baseType + " | null"
|
||||||
|
} else {
|
||||||
|
columnData.TypeScriptType = baseType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this column is a foreign key
|
||||||
|
if fk := w.getForeignKeyForColumn(col.Name, table); fk != nil {
|
||||||
|
columnData.IsForeignKey = true
|
||||||
|
refTableName := fk.ReferencedTable
|
||||||
|
refChain := w.typeMapper.BuildReferencesChain(fk, refTableName)
|
||||||
|
if refChain != "" {
|
||||||
|
columnData.ReferencesLine = "." + refChain
|
||||||
|
// Append to the drizzle chain
|
||||||
|
columnData.DrizzleChain += columnData.ReferencesLine
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tableData.AddColumn(columnData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all column field names that are used in indexes
|
||||||
|
indexColumnFields := make(map[string]bool)
|
||||||
|
|
||||||
|
// Add indexes (excluding single-column unique indexes, which are handled inline)
|
||||||
|
for _, index := range table.Indexes {
|
||||||
|
// Skip single-column unique indexes (handled by .unique() modifier)
|
||||||
|
if index.Unique && len(index.Columns) == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track which columns are used in indexes
|
||||||
|
for _, colName := range index.Columns {
|
||||||
|
// Find the field name for this column
|
||||||
|
if col, exists := table.Columns[colName]; exists {
|
||||||
|
fieldName := w.typeMapper.ToCamelCase(col.Name)
|
||||||
|
indexColumnFields[fieldName] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
indexData := NewIndexData(index, tableData.Name, w.typeMapper)
|
||||||
|
tableData.AddIndex(indexData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add multi-column unique constraints as unique indexes
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.UniqueConstraint && len(constraint.Columns) > 1 {
|
||||||
|
// Create a unique index for this constraint
|
||||||
|
indexData := &IndexData{
|
||||||
|
Name: w.typeMapper.ToCamelCase(constraint.Name) + "Idx",
|
||||||
|
Columns: constraint.Columns,
|
||||||
|
IsUnique: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track which columns are used in indexes
|
||||||
|
for _, colName := range constraint.Columns {
|
||||||
|
if col, exists := table.Columns[colName]; exists {
|
||||||
|
fieldName := w.typeMapper.ToCamelCase(col.Name)
|
||||||
|
indexColumnFields[fieldName] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build column references as field names (for destructuring)
|
||||||
|
colRefs := make([]string, len(constraint.Columns))
|
||||||
|
for i, colName := range constraint.Columns {
|
||||||
|
if col, exists := table.Columns[colName]; exists {
|
||||||
|
colRefs[i] = w.typeMapper.ToCamelCase(col.Name)
|
||||||
|
} else {
|
||||||
|
colRefs[i] = w.typeMapper.ToCamelCase(colName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
indexData.Definition = "uniqueIndex('" + constraint.Name + "').on(" + joinStrings(colRefs, ", ") + ")"
|
||||||
|
tableData.AddIndex(indexData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert index column fields map to sorted slice
|
||||||
|
if len(indexColumnFields) > 0 {
|
||||||
|
fields := make([]string, 0, len(indexColumnFields))
|
||||||
|
for field := range indexColumnFields {
|
||||||
|
fields = append(fields, field)
|
||||||
|
}
|
||||||
|
// Sort for consistent output
|
||||||
|
sortStrings(fields)
|
||||||
|
tableData.IndexColumnFields = fields
|
||||||
|
}
|
||||||
|
|
||||||
|
return tableData
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortStrings sorts a slice of strings in place
|
||||||
|
func sortStrings(strs []string) {
|
||||||
|
for i := 0; i < len(strs); i++ {
|
||||||
|
for j := i + 1; j < len(strs); j++ {
|
||||||
|
if strs[i] > strs[j] {
|
||||||
|
strs[i], strs[j] = strs[j], strs[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addImports adds the necessary imports to the template data
|
||||||
|
func (w *Writer) addImports(templateData *TemplateData, db *models.Database) {
|
||||||
|
// Determine which Drizzle imports we need
|
||||||
|
needsPgTable := len(templateData.Tables) > 0
|
||||||
|
needsPgEnum := len(templateData.Enums) > 0
|
||||||
|
needsIndex := false
|
||||||
|
needsUniqueIndex := false
|
||||||
|
needsSQL := false
|
||||||
|
|
||||||
|
// Check what we need based on tables
|
||||||
|
for _, table := range templateData.Tables {
|
||||||
|
for _, index := range table.Indexes {
|
||||||
|
if index.IsUnique {
|
||||||
|
needsUniqueIndex = true
|
||||||
|
} else {
|
||||||
|
needsIndex = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if any column uses SQL default values
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if strings.Contains(col.DrizzleChain, "sql`") {
|
||||||
|
needsSQL = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the import statement
|
||||||
|
imports := make([]string, 0)
|
||||||
|
|
||||||
|
if needsPgTable {
|
||||||
|
imports = append(imports, "pgTable")
|
||||||
|
}
|
||||||
|
if needsPgEnum {
|
||||||
|
imports = append(imports, "pgEnum")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add column types - for now, add common ones
|
||||||
|
// TODO: Could be optimized to only include used types
|
||||||
|
columnTypes := []string{
|
||||||
|
"integer", "bigint", "smallint",
|
||||||
|
"serial", "bigserial", "smallserial",
|
||||||
|
"text", "varchar", "char",
|
||||||
|
"boolean", "numeric", "real", "doublePrecision",
|
||||||
|
"timestamp", "date", "time", "interval",
|
||||||
|
"json", "jsonb", "uuid", "bytea",
|
||||||
|
}
|
||||||
|
imports = append(imports, columnTypes...)
|
||||||
|
|
||||||
|
if needsIndex {
|
||||||
|
imports = append(imports, "index")
|
||||||
|
}
|
||||||
|
if needsUniqueIndex {
|
||||||
|
imports = append(imports, "uniqueIndex")
|
||||||
|
}
|
||||||
|
|
||||||
|
importLine := "import { " + strings.Join(imports, ", ") + " } from 'drizzle-orm/pg-core';"
|
||||||
|
templateData.AddImport(importLine)
|
||||||
|
|
||||||
|
// Add SQL import if needed
|
||||||
|
if needsSQL {
|
||||||
|
templateData.AddImport("import { sql } from 'drizzle-orm';")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildEnumMap builds a map of enum type names for quick lookup
|
||||||
|
func (w *Writer) buildEnumMap(db *models.Database) map[string]bool {
|
||||||
|
enumMap := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
for _, enum := range schema.Enums {
|
||||||
|
enumMap[enum.Name] = true
|
||||||
|
// Also add lowercase version for case-insensitive lookup
|
||||||
|
enumMap[strings.ToLower(enum.Name)] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return enumMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// tableUsesEnum checks if a table uses any enum types
|
||||||
|
func (w *Writer) tableUsesEnum(table *models.Table, enumMap map[string]bool) bool {
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if enumMap[col.Type] || enumMap[strings.ToLower(col.Type)] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTableEnumNames returns the list of enum variable names used by a table
|
||||||
|
func (w *Writer) getTableEnumNames(table *models.Table, schema *models.Schema, enumMap map[string]bool) []string {
|
||||||
|
enumNames := make([]string, 0)
|
||||||
|
seen := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if enumMap[col.Type] || enumMap[strings.ToLower(col.Type)] {
|
||||||
|
// Find the enum in schema
|
||||||
|
for _, enum := range schema.Enums {
|
||||||
|
if strings.EqualFold(enum.Name, col.Type) {
|
||||||
|
varName := w.typeMapper.ToCamelCase(enum.Name)
|
||||||
|
if !seen[varName] {
|
||||||
|
enumNames = append(enumNames, varName)
|
||||||
|
seen[varName] = true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return enumNames
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSortedColumnNames returns column names sorted by sequence or name
|
||||||
|
func (w *Writer) getSortedColumnNames(table *models.Table) []string {
|
||||||
|
// Convert map to slice
|
||||||
|
columns := make([]*models.Column, 0, len(table.Columns))
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
columns = append(columns, col)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by sequence, then by primary key, then by name
|
||||||
|
// (Similar to GORM writer)
|
||||||
|
sortColumns := func(i, j int) bool {
|
||||||
|
// Sort by sequence if both have it
|
||||||
|
if columns[i].Sequence > 0 && columns[j].Sequence > 0 {
|
||||||
|
return columns[i].Sequence < columns[j].Sequence
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put primary keys first
|
||||||
|
if columns[i].IsPrimaryKey != columns[j].IsPrimaryKey {
|
||||||
|
return columns[i].IsPrimaryKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise sort alphabetically
|
||||||
|
return columns[i].Name < columns[j].Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a custom sorter
|
||||||
|
for i := 0; i < len(columns); i++ {
|
||||||
|
for j := i + 1; j < len(columns); j++ {
|
||||||
|
if !sortColumns(i, j) {
|
||||||
|
columns[i], columns[j] = columns[j], columns[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract names
|
||||||
|
names := make([]string, len(columns))
|
||||||
|
for i, col := range columns {
|
||||||
|
names[i] = col.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// getForeignKeyForColumn returns the foreign key constraint for a column, if any
|
||||||
|
func (w *Writer) getForeignKeyForColumn(columnName string, table *models.Table) *models.Constraint {
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
for _, col := range constraint.Columns {
|
||||||
|
if col == columnName {
|
||||||
|
return constraint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeOutput writes the content to file or stdout
|
||||||
|
func (w *Writer) writeOutput(content string) error {
|
||||||
|
if w.options.OutputPath != "" {
|
||||||
|
return os.WriteFile(w.options.OutputPath, []byte(content), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print to stdout
|
||||||
|
fmt.Print(content)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldUseMultiFile determines whether to use multi-file mode based on metadata or output path
|
||||||
|
func (w *Writer) shouldUseMultiFile() bool {
|
||||||
|
// Check if multi_file is explicitly set in metadata
|
||||||
|
if w.options.Metadata != nil {
|
||||||
|
if mf, ok := w.options.Metadata["multi_file"].(bool); ok {
|
||||||
|
return mf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto-detect based on output path
|
||||||
|
if w.options.OutputPath == "" {
|
||||||
|
// No output path means stdout (single file)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path ends with .ts (explicit file)
|
||||||
|
if strings.HasSuffix(w.options.OutputPath, ".ts") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path ends with directory separator
|
||||||
|
if strings.HasSuffix(w.options.OutputPath, "/") || strings.HasSuffix(w.options.OutputPath, "\\") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path exists and is a directory
|
||||||
|
info, err := os.Stat(w.options.OutputPath)
|
||||||
|
if err == nil && info.IsDir() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to single file for ambiguous cases
|
||||||
|
return false
|
||||||
|
}
|
||||||
176
pkg/writers/gorm/README.md
Normal file
176
pkg/writers/gorm/README.md
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
# GORM Writer
|
||||||
|
|
||||||
|
Generates Go source files with GORM model definitions from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The GORM Writer converts RelSpec's internal database model representation into Go source code with GORM struct definitions, complete with proper tags, relationships, and methods.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates GORM-compatible Go structs
|
||||||
|
- Creates proper `gorm` struct tags
|
||||||
|
- Generates `TableName()` methods
|
||||||
|
- Adds relationship fields (belongs-to, has-many)
|
||||||
|
- Supports both single-file and multi-file output
|
||||||
|
- Auto-generates helper methods (optional)
|
||||||
|
- Maps SQL types to Go types
|
||||||
|
- Handles nullable fields with custom sql_types
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Assume db is a *models.Database from a reader
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "models.go",
|
||||||
|
PackageName: "models",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := gorm.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate GORM models from PostgreSQL database (single file)
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output gorm \
|
||||||
|
--out-file models.go \
|
||||||
|
--package models
|
||||||
|
|
||||||
|
# Generate GORM models with multi-file output (one file per table)
|
||||||
|
relspec --input json \
|
||||||
|
--in-file schema.json \
|
||||||
|
--output gorm \
|
||||||
|
--out-file models/ \
|
||||||
|
--package models
|
||||||
|
|
||||||
|
# Convert DBML to GORM models
|
||||||
|
relspec --input dbml --in-file schema.dbml --output gorm --out-file models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Modes
|
||||||
|
|
||||||
|
### Single File Mode
|
||||||
|
|
||||||
|
Generates all models in one file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
relspec --input pgsql --conn "..." --output gorm --out-file models.go
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multi-File Mode
|
||||||
|
|
||||||
|
Generates one file per table (auto-detected when output is a directory):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
relspec --input pgsql --conn "..." --output gorm --out-file models/
|
||||||
|
```
|
||||||
|
|
||||||
|
Files are named: `sql_{schema}_{table}.go`
|
||||||
|
|
||||||
|
## Generated Code Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
sql_types "git.warky.dev/wdevs/sql_types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ModelUser struct {
|
||||||
|
ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement" json:"id"`
|
||||||
|
Username string `gorm:"column:username;type:varchar(50);not null;uniqueIndex" json:"username"`
|
||||||
|
Email string `gorm:"column:email;type:varchar(100);not null" json:"email"`
|
||||||
|
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:now()" json:"created_at"`
|
||||||
|
|
||||||
|
// Relationships
|
||||||
|
Pos []*ModelPost `gorm:"foreignKey:UserID;references:ID;constraint:OnDelete:CASCADE" json:"pos,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ModelUser) TableName() string {
|
||||||
|
return "public.users"
|
||||||
|
}
|
||||||
|
|
||||||
|
type ModelPost struct {
|
||||||
|
ID int64 `gorm:"column:id;type:bigint;primaryKey" json:"id"`
|
||||||
|
UserID int64 `gorm:"column:user_id;type:bigint;not null" json:"user_id"`
|
||||||
|
Title string `gorm:"column:title;type:varchar(200);not null" json:"title"`
|
||||||
|
Content sql_types.SqlString `gorm:"column:content;type:text" json:"content,omitempty"`
|
||||||
|
|
||||||
|
// Belongs to
|
||||||
|
Use *ModelUser `gorm:"foreignKey:UserID;references:ID" json:"use,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ModelPost) TableName() string {
|
||||||
|
return "public.posts"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Writer Options
|
||||||
|
|
||||||
|
### Metadata Options
|
||||||
|
|
||||||
|
Configure the writer behavior using metadata in `WriterOptions`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "models.go",
|
||||||
|
PackageName: "models",
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"multi_file": true, // Enable multi-file mode
|
||||||
|
"populate_refs": true, // Populate RefDatabase/RefSchema
|
||||||
|
"generate_get_id_str": true, // Generate GetIDStr() methods
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Type Mapping
|
||||||
|
|
||||||
|
| SQL Type | Go Type | Notes |
|
||||||
|
|----------|---------|-------|
|
||||||
|
| bigint, int8 | int64 | - |
|
||||||
|
| integer, int, int4 | int | - |
|
||||||
|
| smallint, int2 | int16 | - |
|
||||||
|
| varchar, text | string | Not nullable |
|
||||||
|
| varchar, text (nullable) | sql_types.SqlString | Nullable |
|
||||||
|
| boolean, bool | bool | - |
|
||||||
|
| timestamp, timestamptz | time.Time | - |
|
||||||
|
| numeric, decimal | float64 | - |
|
||||||
|
| uuid | string | - |
|
||||||
|
| json, jsonb | string | - |
|
||||||
|
|
||||||
|
## Relationship Generation
|
||||||
|
|
||||||
|
The writer automatically generates relationship fields:
|
||||||
|
|
||||||
|
- **Belongs-to**: Generated for tables with foreign keys
|
||||||
|
- **Has-many**: Generated for tables referenced by foreign keys
|
||||||
|
- Relationship field names use 3-letter prefixes
|
||||||
|
- Includes proper `gorm` tags with `foreignKey` and `references`
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Model names are prefixed with "Model" (e.g., `ModelUser`)
|
||||||
|
- Nullable columns use `sql_types.SqlString`, `sql_types.SqlInt64`, etc.
|
||||||
|
- Generated code is auto-formatted with `go fmt`
|
||||||
|
- JSON tags are automatically added
|
||||||
|
- Supports schema-qualified table names in `TableName()` method
|
||||||
@@ -41,12 +41,7 @@ func NewWriter(options *writers.WriterOptions) *Writer {
|
|||||||
// WriteDatabase writes a complete database as GORM models
|
// WriteDatabase writes a complete database as GORM models
|
||||||
func (w *Writer) WriteDatabase(db *models.Database) error {
|
func (w *Writer) WriteDatabase(db *models.Database) error {
|
||||||
// Check if multi-file mode is enabled
|
// Check if multi-file mode is enabled
|
||||||
multiFile := false
|
multiFile := w.shouldUseMultiFile()
|
||||||
if w.options.Metadata != nil {
|
|
||||||
if mf, ok := w.options.Metadata["multi_file"].(bool); ok {
|
|
||||||
multiFile = mf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if multiFile {
|
if multiFile {
|
||||||
return w.writeMultiFile(db)
|
return w.writeMultiFile(db)
|
||||||
@@ -340,6 +335,41 @@ func (w *Writer) writeOutput(content string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shouldUseMultiFile determines whether to use multi-file mode based on metadata or output path
|
||||||
|
func (w *Writer) shouldUseMultiFile() bool {
|
||||||
|
// Check if multi_file is explicitly set in metadata
|
||||||
|
if w.options.Metadata != nil {
|
||||||
|
if mf, ok := w.options.Metadata["multi_file"].(bool); ok {
|
||||||
|
return mf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto-detect based on output path
|
||||||
|
if w.options.OutputPath == "" {
|
||||||
|
// No output path means stdout (single file)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path ends with .go (explicit file)
|
||||||
|
if strings.HasSuffix(w.options.OutputPath, ".go") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path ends with directory separator
|
||||||
|
if strings.HasSuffix(w.options.OutputPath, "/") || strings.HasSuffix(w.options.OutputPath, "\\") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if path exists and is a directory
|
||||||
|
info, err := os.Stat(w.options.OutputPath)
|
||||||
|
if err == nil && info.IsDir() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to single file for ambiguous cases
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// createDatabaseRef creates a shallow copy of database without schemas to avoid circular references
|
// createDatabaseRef creates a shallow copy of database without schemas to avoid circular references
|
||||||
func (w *Writer) createDatabaseRef(db *models.Database) *models.Database {
|
func (w *Writer) createDatabaseRef(db *models.Database) *models.Database {
|
||||||
return &models.Database{
|
return &models.Database{
|
||||||
|
|||||||
272
pkg/writers/graphql/README.md
Normal file
272
pkg/writers/graphql/README.md
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
# GraphQL Schema Writer
|
||||||
|
|
||||||
|
The GraphQL writer converts RelSpec's internal database model into GraphQL Schema Definition Language (SDL) files.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Table to Type mapping**: Database tables become GraphQL types
|
||||||
|
- **Column to Field mapping**: Table columns become type fields
|
||||||
|
- **Enum support**: Database enums are preserved
|
||||||
|
- **Custom scalar declarations**: Automatically declares DateTime, JSON, Date scalars
|
||||||
|
- **Implicit relationships**: Generates relationship fields from foreign keys
|
||||||
|
- **Many-to-many support**: Handles junction tables intelligently
|
||||||
|
- **Clean output**: Proper formatting, field ordering, and comments
|
||||||
|
|
||||||
|
## Type Mappings
|
||||||
|
|
||||||
|
### SQL to GraphQL
|
||||||
|
|
||||||
|
| SQL Type | GraphQL Type | Notes |
|
||||||
|
|----------|--------------|-------|
|
||||||
|
| bigint, integer, serial (PK) | ID | Primary keys map to ID |
|
||||||
|
| bigint, integer, int | Int | |
|
||||||
|
| text, varchar, char | String | |
|
||||||
|
| uuid (PK) | ID | UUID primary keys also map to ID |
|
||||||
|
| uuid | String | Non-PK UUIDs map to String |
|
||||||
|
| double precision, numeric, float | Float | |
|
||||||
|
| boolean | Boolean | |
|
||||||
|
| timestamp, timestamptz | DateTime | Custom scalar |
|
||||||
|
| jsonb, json | JSON | Custom scalar |
|
||||||
|
| date | Date | Custom scalar |
|
||||||
|
| Enum types | Enum | Preserves enum name |
|
||||||
|
| Arrays (e.g., text[]) | [Type] | Mapped to GraphQL lists |
|
||||||
|
|
||||||
|
## Relationship Handling
|
||||||
|
|
||||||
|
The writer intelligently generates relationship fields based on foreign key constraints:
|
||||||
|
|
||||||
|
### Forward Relationships (FK on this table)
|
||||||
|
```sql
|
||||||
|
-- Post table has authorId FK to User.id
|
||||||
|
CREATE TABLE post (
|
||||||
|
id bigint PRIMARY KEY,
|
||||||
|
title text NOT NULL,
|
||||||
|
author_id bigint NOT NULL REFERENCES user(id)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
type Post {
|
||||||
|
id: ID!
|
||||||
|
title: String!
|
||||||
|
author: User! # Generated from authorId FK
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reverse Relationships (FK on other table)
|
||||||
|
```graphql
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
email: String!
|
||||||
|
posts: [Post!]! # Reverse relationship (Post has FK to User)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Many-to-Many Relationships
|
||||||
|
|
||||||
|
Junction tables (tables with only PKs and FKs) are automatically detected and hidden:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE post_tag (
|
||||||
|
post_id bigint NOT NULL REFERENCES post(id),
|
||||||
|
tag_id bigint NOT NULL REFERENCES tag(id),
|
||||||
|
PRIMARY KEY (post_id, tag_id)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
type Post {
|
||||||
|
id: ID!
|
||||||
|
tags: [Tag!]! # Many-to-many through PostTag junction table
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tag {
|
||||||
|
id: ID!
|
||||||
|
posts: [Post!]! # Reverse many-to-many
|
||||||
|
}
|
||||||
|
|
||||||
|
# Note: PostTag junction table is NOT included in output
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/graphql"
|
||||||
|
)
|
||||||
|
|
||||||
|
opts := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.graphql",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := graphql.NewWriter(opts)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
```
|
||||||
|
|
||||||
|
### With Metadata Options
|
||||||
|
|
||||||
|
```go
|
||||||
|
opts := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.graphql",
|
||||||
|
Metadata: map[string]any{
|
||||||
|
"includeScalarDeclarations": true, // Include scalar declarations
|
||||||
|
"includeComments": true, // Include field/table comments
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := graphql.NewWriter(opts)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write to Stdout
|
||||||
|
|
||||||
|
```go
|
||||||
|
opts := &writers.WriterOptions{
|
||||||
|
OutputPath: "", // Empty path writes to stdout
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := graphql.NewWriter(opts)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
```
|
||||||
|
|
||||||
|
## CLI Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Convert PostgreSQL database to GraphQL
|
||||||
|
relspec convert --from pgsql \
|
||||||
|
--from-conn "postgres://user:pass@localhost:5432/mydb" \
|
||||||
|
--to graphql --to-path schema.graphql
|
||||||
|
|
||||||
|
# Convert GORM models to GraphQL
|
||||||
|
relspec convert --from gorm --from-path ./models \
|
||||||
|
--to graphql --to-path schema.graphql
|
||||||
|
|
||||||
|
# Convert JSON to GraphQL
|
||||||
|
relspec convert --from json --from-path schema.json \
|
||||||
|
--to graphql --to-path schema.graphql
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
The generated GraphQL schema follows this structure:
|
||||||
|
|
||||||
|
1. **Header comment** (if enabled)
|
||||||
|
2. **Custom scalar declarations** (if any custom scalars are used)
|
||||||
|
3. **Enum definitions** (alphabetically sorted)
|
||||||
|
4. **Type definitions** (with fields ordered: ID first, then scalars alphabetically, then relationships)
|
||||||
|
|
||||||
|
### Example Output
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
# Generated GraphQL Schema
|
||||||
|
# Database: myapp
|
||||||
|
|
||||||
|
scalar DateTime
|
||||||
|
scalar JSON
|
||||||
|
scalar Date
|
||||||
|
|
||||||
|
enum Role {
|
||||||
|
ADMIN
|
||||||
|
USER
|
||||||
|
MODERATOR
|
||||||
|
}
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
createdAt: DateTime!
|
||||||
|
email: String!
|
||||||
|
name: String!
|
||||||
|
role: Role!
|
||||||
|
|
||||||
|
posts: [Post!]!
|
||||||
|
profile: Profile
|
||||||
|
}
|
||||||
|
|
||||||
|
type Post {
|
||||||
|
id: ID!
|
||||||
|
content: String
|
||||||
|
published: Boolean!
|
||||||
|
publishedAt: Date
|
||||||
|
title: String!
|
||||||
|
|
||||||
|
author: User!
|
||||||
|
tags: [Tag!]!
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tag {
|
||||||
|
id: ID!
|
||||||
|
name: String!
|
||||||
|
|
||||||
|
posts: [Post!]!
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Metadata Options
|
||||||
|
|
||||||
|
| Option | Type | Description | Default |
|
||||||
|
|--------|------|-------------|---------|
|
||||||
|
| `includeScalarDeclarations` | bool | Include `scalar DateTime`, etc. declarations | true |
|
||||||
|
| `includeComments` | bool | Include table/field descriptions as comments | true |
|
||||||
|
| `preservePKType` | bool | Use Int/String for PKs instead of ID | false |
|
||||||
|
|
||||||
|
## Field Naming Conventions
|
||||||
|
|
||||||
|
- **FK columns**: Foreign key columns like `authorId` are removed from the output; instead, a relationship field `author` is generated
|
||||||
|
- **Relationship pluralization**: Reverse one-to-many relationships are pluralized (e.g., `posts`, `tags`)
|
||||||
|
- **CamelCase**: Field names are kept in their original casing from the database
|
||||||
|
|
||||||
|
## Junction Table Detection
|
||||||
|
|
||||||
|
A table is considered a junction table if it:
|
||||||
|
1. Has exactly 2 foreign key constraints
|
||||||
|
2. All columns are either primary keys or foreign keys
|
||||||
|
3. Has a composite primary key on the FK columns
|
||||||
|
|
||||||
|
Junction tables are automatically hidden from the GraphQL output, and many-to-many relationship fields are generated on the related types instead.
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
- All tables in all schemas are flattened into a single GraphQL schema
|
||||||
|
- No support for GraphQL-specific features like directives, interfaces, or unions
|
||||||
|
- Nullable vs non-nullable is determined solely by the `NOT NULL` constraint
|
||||||
|
|
||||||
|
## Example Conversion
|
||||||
|
|
||||||
|
**Input** (Database Schema):
|
||||||
|
```sql
|
||||||
|
CREATE TABLE user (
|
||||||
|
id bigint PRIMARY KEY,
|
||||||
|
email text NOT NULL,
|
||||||
|
created_at timestamp NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE post (
|
||||||
|
id bigint PRIMARY KEY,
|
||||||
|
title text NOT NULL,
|
||||||
|
author_id bigint NOT NULL REFERENCES user(id)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output** (GraphQL Schema):
|
||||||
|
```graphql
|
||||||
|
scalar DateTime
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
createdAt: DateTime!
|
||||||
|
email: String!
|
||||||
|
|
||||||
|
posts: [Post!]!
|
||||||
|
}
|
||||||
|
|
||||||
|
type Post {
|
||||||
|
id: ID!
|
||||||
|
title: String!
|
||||||
|
|
||||||
|
author: User!
|
||||||
|
}
|
||||||
|
```
|
||||||
178
pkg/writers/graphql/relationships.go
Normal file
178
pkg/writers/graphql/relationships.go
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (w *Writer) generateRelationFields(table *models.Table, db *models.Database, schema *models.Schema) []string {
|
||||||
|
var fields []string
|
||||||
|
|
||||||
|
// 1. Forward relationships (this table has FK)
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type != models.ForeignKeyConstraint {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the related table
|
||||||
|
relatedTable := w.findTable(db, constraint.ReferencedSchema, constraint.ReferencedTable)
|
||||||
|
if relatedTable == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate field name (remove "Id" suffix from FK column if present)
|
||||||
|
fieldName := w.relationFieldName(constraint.Columns[0])
|
||||||
|
|
||||||
|
// Determine nullability from FK column
|
||||||
|
nullable := true
|
||||||
|
for _, colName := range constraint.Columns {
|
||||||
|
if col, exists := table.Columns[colName]; exists {
|
||||||
|
if col.NotNull {
|
||||||
|
nullable = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format: fieldName: RelatedType! or fieldName: RelatedType
|
||||||
|
gqlType := relatedTable.Name
|
||||||
|
if !nullable {
|
||||||
|
gqlType += "!"
|
||||||
|
}
|
||||||
|
|
||||||
|
fields = append(fields, fmt.Sprintf(" %s: %s", fieldName, gqlType))
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Reverse relationships (other tables reference this table)
|
||||||
|
for _, otherSchema := range db.Schemas {
|
||||||
|
for _, otherTable := range otherSchema.Tables {
|
||||||
|
if otherTable.Name == table.Name && otherSchema.Name == schema.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip join tables for many-to-many
|
||||||
|
if w.isJoinTable(otherTable) {
|
||||||
|
// Check if this is a many-to-many through this join table
|
||||||
|
if m2mField := w.getManyToManyField(table, otherTable, db); m2mField != "" {
|
||||||
|
fields = append(fields, m2mField)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, constraint := range otherTable.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint &&
|
||||||
|
constraint.ReferencedTable == table.Name &&
|
||||||
|
constraint.ReferencedSchema == schema.Name {
|
||||||
|
// Add reverse relationship field (array)
|
||||||
|
fieldName := w.pluralize(w.camelCase(otherTable.Name))
|
||||||
|
fields = append(fields, fmt.Sprintf(" %s: [%s!]!", fieldName, otherTable.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) getManyToManyField(table *models.Table, joinTable *models.Table, db *models.Database) string {
|
||||||
|
// Find the two FK constraints in the join table
|
||||||
|
var fk1, fk2 *models.Constraint
|
||||||
|
for _, constraint := range joinTable.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
if fk1 == nil {
|
||||||
|
fk1 = constraint
|
||||||
|
} else {
|
||||||
|
fk2 = constraint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fk1 == nil || fk2 == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine which FK points to our table and which to the other table
|
||||||
|
var targetConstraint *models.Constraint
|
||||||
|
if fk1.ReferencedTable == table.Name {
|
||||||
|
targetConstraint = fk2
|
||||||
|
} else if fk2.ReferencedTable == table.Name {
|
||||||
|
targetConstraint = fk1
|
||||||
|
} else {
|
||||||
|
return "" // This join table doesn't involve our table
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the target table
|
||||||
|
targetTable := w.findTable(db, targetConstraint.ReferencedSchema, targetConstraint.ReferencedTable)
|
||||||
|
if targetTable == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate many-to-many field
|
||||||
|
fieldName := w.pluralize(w.camelCase(targetTable.Name))
|
||||||
|
return fmt.Sprintf(" %s: [%s!]!", fieldName, targetTable.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) findTable(db *models.Database, schemaName, tableName string) *models.Table {
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
if schema.Name != schemaName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if table.Name == tableName {
|
||||||
|
return table
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) relationFieldName(fkColumnName string) string {
|
||||||
|
// Remove "Id" or "_id" suffix
|
||||||
|
name := fkColumnName
|
||||||
|
if strings.HasSuffix(name, "Id") {
|
||||||
|
name = name[:len(name)-2]
|
||||||
|
} else if strings.HasSuffix(name, "_id") {
|
||||||
|
name = name[:len(name)-3]
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.camelCase(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) camelCase(s string) string {
|
||||||
|
// If already camelCase or PascalCase, convert to camelCase
|
||||||
|
if s == "" {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert first character to lowercase
|
||||||
|
return strings.ToLower(string(s[0])) + s[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) pluralize(s string) string {
|
||||||
|
// Simple pluralization rules
|
||||||
|
if s == "" {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Already plural
|
||||||
|
if strings.HasSuffix(s, "s") {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Words ending in 'y' → 'ies'
|
||||||
|
if strings.HasSuffix(s, "y") {
|
||||||
|
return s[:len(s)-1] + "ies"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Words ending in 's', 'x', 'z', 'ch', 'sh' → add 'es'
|
||||||
|
if strings.HasSuffix(s, "s") || strings.HasSuffix(s, "x") ||
|
||||||
|
strings.HasSuffix(s, "z") || strings.HasSuffix(s, "ch") ||
|
||||||
|
strings.HasSuffix(s, "sh") {
|
||||||
|
return s + "es"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default: add 's'
|
||||||
|
return s + "s"
|
||||||
|
}
|
||||||
148
pkg/writers/graphql/type_mapping.go
Normal file
148
pkg/writers/graphql/type_mapping.go
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (w *Writer) sqlTypeToGraphQL(sqlType string, column *models.Column, table *models.Table, schema *models.Schema) string {
|
||||||
|
// Check if this is a primary key → ID type
|
||||||
|
if column.IsPrimaryKey {
|
||||||
|
// Check metadata for explicit type preference
|
||||||
|
if w.options.Metadata != nil {
|
||||||
|
if preserveType, ok := w.options.Metadata["preservePKType"].(bool); ok && preserveType {
|
||||||
|
// Use Int or String based on SQL type
|
||||||
|
if w.isIntegerType(sqlType) {
|
||||||
|
return "Int"
|
||||||
|
}
|
||||||
|
return "String"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "ID"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map SQL types to custom scalars
|
||||||
|
if scalar := w.sqlTypeToCustomScalar(sqlType); scalar != "" {
|
||||||
|
return scalar
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's an enum
|
||||||
|
if w.isEnumType(sqlType, schema) {
|
||||||
|
return sqlType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard type mappings
|
||||||
|
baseType := strings.Split(sqlType, "(")[0] // Remove length/precision
|
||||||
|
baseType = strings.TrimSpace(baseType)
|
||||||
|
|
||||||
|
// Handle array types
|
||||||
|
if strings.HasSuffix(baseType, "[]") {
|
||||||
|
elemType := strings.TrimSuffix(baseType, "[]")
|
||||||
|
gqlType := w.mapBaseTypeToGraphQL(elemType)
|
||||||
|
return "[" + gqlType + "]"
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.mapBaseTypeToGraphQL(baseType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) mapBaseTypeToGraphQL(baseType string) string {
|
||||||
|
typeMap := map[string]string{
|
||||||
|
// Text types
|
||||||
|
"text": "String",
|
||||||
|
"varchar": "String",
|
||||||
|
"char": "String",
|
||||||
|
"character": "String",
|
||||||
|
"bpchar": "String",
|
||||||
|
"name": "String",
|
||||||
|
|
||||||
|
// UUID
|
||||||
|
"uuid": "ID",
|
||||||
|
|
||||||
|
// Integer types
|
||||||
|
"integer": "Int",
|
||||||
|
"int": "Int",
|
||||||
|
"int2": "Int",
|
||||||
|
"int4": "Int",
|
||||||
|
"int8": "Int",
|
||||||
|
"bigint": "Int",
|
||||||
|
"smallint": "Int",
|
||||||
|
"serial": "Int",
|
||||||
|
"bigserial": "Int",
|
||||||
|
"smallserial": "Int",
|
||||||
|
|
||||||
|
// Float types
|
||||||
|
"double precision": "Float",
|
||||||
|
"float": "Float",
|
||||||
|
"float4": "Float",
|
||||||
|
"float8": "Float",
|
||||||
|
"real": "Float",
|
||||||
|
"numeric": "Float",
|
||||||
|
"decimal": "Float",
|
||||||
|
"money": "Float",
|
||||||
|
|
||||||
|
// Boolean
|
||||||
|
"boolean": "Boolean",
|
||||||
|
"bool": "Boolean",
|
||||||
|
}
|
||||||
|
|
||||||
|
if gqlType, ok := typeMap[baseType]; ok {
|
||||||
|
return gqlType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default: capitalize first letter
|
||||||
|
if len(baseType) > 0 {
|
||||||
|
return strings.ToUpper(string(baseType[0])) + baseType[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return "String"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) sqlTypeToCustomScalar(sqlType string) string {
|
||||||
|
scalarMap := map[string]string{
|
||||||
|
"timestamp": "DateTime",
|
||||||
|
"timestamptz": "DateTime",
|
||||||
|
"timestamp with time zone": "DateTime",
|
||||||
|
"jsonb": "JSON",
|
||||||
|
"json": "JSON",
|
||||||
|
"date": "Date",
|
||||||
|
}
|
||||||
|
|
||||||
|
baseType := strings.Split(sqlType, "(")[0]
|
||||||
|
baseType = strings.TrimSpace(baseType)
|
||||||
|
|
||||||
|
if scalar, ok := scalarMap[baseType]; ok {
|
||||||
|
return scalar
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) isIntegerType(sqlType string) bool {
|
||||||
|
intTypes := map[string]bool{
|
||||||
|
"integer": true,
|
||||||
|
"int": true,
|
||||||
|
"int2": true,
|
||||||
|
"int4": true,
|
||||||
|
"int8": true,
|
||||||
|
"bigint": true,
|
||||||
|
"smallint": true,
|
||||||
|
"serial": true,
|
||||||
|
"bigserial": true,
|
||||||
|
"smallserial": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
baseType := strings.Split(sqlType, "(")[0]
|
||||||
|
baseType = strings.TrimSpace(baseType)
|
||||||
|
|
||||||
|
return intTypes[baseType]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) isEnumType(sqlType string, schema *models.Schema) bool {
|
||||||
|
for _, enum := range schema.Enums {
|
||||||
|
if enum.Name == sqlType {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
272
pkg/writers/graphql/writer.go
Normal file
272
pkg/writers/graphql/writer.go
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Writer struct {
|
||||||
|
options *writers.WriterOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWriter(options *writers.WriterOptions) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) WriteDatabase(db *models.Database) error {
|
||||||
|
content := w.databaseToGraphQL(db)
|
||||||
|
|
||||||
|
if w.options.OutputPath != "" {
|
||||||
|
return os.WriteFile(w.options.OutputPath, []byte(content), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print(content)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) WriteSchema(schema *models.Schema) error {
|
||||||
|
db := models.InitDatabase(schema.Name)
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
return w.WriteDatabase(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) WriteTable(table *models.Table) error {
|
||||||
|
schema := models.InitSchema(table.Schema)
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
db := models.InitDatabase(schema.Name)
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
return w.WriteDatabase(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) databaseToGraphQL(db *models.Database) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Header comment
|
||||||
|
if w.shouldIncludeComments() {
|
||||||
|
sb.WriteString("# Generated GraphQL Schema\n")
|
||||||
|
if db.Name != "" {
|
||||||
|
sb.WriteString(fmt.Sprintf("# Database: %s\n", db.Name))
|
||||||
|
}
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom scalar declarations
|
||||||
|
if w.shouldIncludeScalarDeclarations() {
|
||||||
|
scalars := w.collectCustomScalars(db)
|
||||||
|
if len(scalars) > 0 {
|
||||||
|
for _, scalar := range scalars {
|
||||||
|
sb.WriteString(fmt.Sprintf("scalar %s\n", scalar))
|
||||||
|
}
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enum definitions
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
for _, enum := range schema.Enums {
|
||||||
|
sb.WriteString(w.enumToGraphQL(enum))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type definitions
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
// Skip join tables (tables with only PK+FK columns)
|
||||||
|
if w.isJoinTable(table) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(w.tableToGraphQL(table, db, schema))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) shouldIncludeComments() bool {
|
||||||
|
if w.options.Metadata != nil {
|
||||||
|
if include, ok := w.options.Metadata["includeComments"].(bool); ok {
|
||||||
|
return include
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true // Default to true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) shouldIncludeScalarDeclarations() bool {
|
||||||
|
if w.options.Metadata != nil {
|
||||||
|
if include, ok := w.options.Metadata["includeScalarDeclarations"].(bool); ok {
|
||||||
|
return include
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true // Default to true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) collectCustomScalars(db *models.Database) []string {
|
||||||
|
scalarsNeeded := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if scalar := w.sqlTypeToCustomScalar(col.Type); scalar != "" {
|
||||||
|
scalarsNeeded[scalar] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to sorted slice
|
||||||
|
scalars := make([]string, 0, len(scalarsNeeded))
|
||||||
|
for scalar := range scalarsNeeded {
|
||||||
|
scalars = append(scalars, scalar)
|
||||||
|
}
|
||||||
|
sort.Strings(scalars)
|
||||||
|
|
||||||
|
return scalars
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) isJoinTable(table *models.Table) bool {
|
||||||
|
// A join table typically has:
|
||||||
|
// 1. Exactly 2 FK constraints
|
||||||
|
// 2. Composite primary key on those FK columns
|
||||||
|
// 3. No other columns
|
||||||
|
|
||||||
|
fkCount := 0
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
fkCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fkCount != 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if all columns are either PKs or FKs
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
isFKColumn := false
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
for _, fkCol := range constraint.Columns {
|
||||||
|
if fkCol == col.Name {
|
||||||
|
isFKColumn = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFKColumn && !col.IsPrimaryKey {
|
||||||
|
// Found a column that's neither PK nor FK
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) enumToGraphQL(enum *models.Enum) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf("enum %s {\n", enum.Name))
|
||||||
|
for _, value := range enum.Values {
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s\n", value))
|
||||||
|
}
|
||||||
|
sb.WriteString("}\n")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) tableToGraphQL(table *models.Table, db *models.Database, schema *models.Schema) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Type name
|
||||||
|
typeName := table.Name
|
||||||
|
|
||||||
|
// Description comment
|
||||||
|
if w.shouldIncludeComments() && (table.Description != "" || table.Comment != "") {
|
||||||
|
desc := table.Description
|
||||||
|
if desc == "" {
|
||||||
|
desc = table.Comment
|
||||||
|
}
|
||||||
|
sb.WriteString(fmt.Sprintf("# %s\n", desc))
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf("type %s {\n", typeName))
|
||||||
|
|
||||||
|
// Collect and categorize fields
|
||||||
|
var idFields, scalarFields, relationFields []string
|
||||||
|
|
||||||
|
for _, column := range table.Columns {
|
||||||
|
// Skip FK columns (they become relation fields)
|
||||||
|
if w.isForeignKeyColumn(column, table) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
gqlType := w.sqlTypeToGraphQL(column.Type, column, table, schema)
|
||||||
|
if gqlType == "" {
|
||||||
|
continue // Skip if type couldn't be mapped
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine nullability
|
||||||
|
if column.NotNull {
|
||||||
|
gqlType += "!"
|
||||||
|
}
|
||||||
|
|
||||||
|
field := fmt.Sprintf(" %s: %s", column.Name, gqlType)
|
||||||
|
|
||||||
|
if column.IsPrimaryKey {
|
||||||
|
idFields = append(idFields, field)
|
||||||
|
} else {
|
||||||
|
scalarFields = append(scalarFields, field)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add relation fields
|
||||||
|
relationFields = w.generateRelationFields(table, db, schema)
|
||||||
|
|
||||||
|
// Write fields in order: ID, scalars (sorted), relations (sorted)
|
||||||
|
for _, field := range idFields {
|
||||||
|
sb.WriteString(field + "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(scalarFields)
|
||||||
|
for _, field := range scalarFields {
|
||||||
|
sb.WriteString(field + "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(relationFields) > 0 {
|
||||||
|
if len(scalarFields) > 0 || len(idFields) > 0 {
|
||||||
|
sb.WriteString("\n") // Blank line before relations
|
||||||
|
}
|
||||||
|
sort.Strings(relationFields)
|
||||||
|
for _, field := range relationFields {
|
||||||
|
sb.WriteString(field + "\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString("}\n")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) isForeignKeyColumn(column *models.Column, table *models.Table) bool {
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
for _, fkCol := range constraint.Columns {
|
||||||
|
if fkCol == column.Name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
412
pkg/writers/graphql/writer_test.go
Normal file
412
pkg/writers/graphql/writer_test.go
Normal file
@@ -0,0 +1,412 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWriter_WriteTable_Simple(t *testing.T) {
|
||||||
|
table := models.InitTable("User", "public")
|
||||||
|
|
||||||
|
idCol := models.InitColumn("id", "User", "public")
|
||||||
|
idCol.Type = "bigint"
|
||||||
|
idCol.IsPrimaryKey = true
|
||||||
|
idCol.NotNull = true
|
||||||
|
table.Columns["id"] = idCol
|
||||||
|
|
||||||
|
nameCol := models.InitColumn("name", "User", "public")
|
||||||
|
nameCol.Type = "text"
|
||||||
|
nameCol.NotNull = true
|
||||||
|
table.Columns["name"] = nameCol
|
||||||
|
|
||||||
|
emailCol := models.InitColumn("email", "User", "public")
|
||||||
|
emailCol.Type = "text"
|
||||||
|
emailCol.NotNull = false
|
||||||
|
table.Columns["email"] = emailCol
|
||||||
|
|
||||||
|
opts := &writers.WriterOptions{
|
||||||
|
OutputPath: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := NewWriter(opts)
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
db := models.InitDatabase("test")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
output := writer.databaseToGraphQL(db)
|
||||||
|
|
||||||
|
// Verify output contains type definition
|
||||||
|
if !strings.Contains(output, "type User {") {
|
||||||
|
t.Error("Expected 'type User {' in output")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify fields
|
||||||
|
if !strings.Contains(output, "id: ID!") {
|
||||||
|
t.Error("Expected 'id: ID!' in output")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(output, "name: String!") {
|
||||||
|
t.Error("Expected 'name: String!' in output")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(output, "email: String") {
|
||||||
|
t.Error("Expected 'email: String' in output")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure email is not followed by ! (nullable)
|
||||||
|
if strings.Contains(output, "email: String!") {
|
||||||
|
t.Error("Did not expect 'email: String!' (should be nullable)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriter_WriteDatabase_WithEnum(t *testing.T) {
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
|
||||||
|
// Create enum
|
||||||
|
roleEnum := &models.Enum{
|
||||||
|
Name: "Role",
|
||||||
|
Schema: "public",
|
||||||
|
Values: []string{"ADMIN", "USER", "GUEST"},
|
||||||
|
}
|
||||||
|
schema.Enums = []*models.Enum{roleEnum}
|
||||||
|
|
||||||
|
// Create table with enum field
|
||||||
|
table := models.InitTable("User", "public")
|
||||||
|
|
||||||
|
idCol := models.InitColumn("id", "User", "public")
|
||||||
|
idCol.Type = "bigint"
|
||||||
|
idCol.IsPrimaryKey = true
|
||||||
|
idCol.NotNull = true
|
||||||
|
table.Columns["id"] = idCol
|
||||||
|
|
||||||
|
roleCol := models.InitColumn("role", "User", "public")
|
||||||
|
roleCol.Type = "Role"
|
||||||
|
roleCol.NotNull = true
|
||||||
|
table.Columns["role"] = roleCol
|
||||||
|
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
|
||||||
|
db := models.InitDatabase("test")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
opts := &writers.WriterOptions{}
|
||||||
|
writer := NewWriter(opts)
|
||||||
|
|
||||||
|
output := writer.databaseToGraphQL(db)
|
||||||
|
|
||||||
|
// Verify enum definition
|
||||||
|
if !strings.Contains(output, "enum Role {") {
|
||||||
|
t.Error("Expected 'enum Role {' in output")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(output, "ADMIN") {
|
||||||
|
t.Error("Expected 'ADMIN' enum value in output")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify enum usage in type
|
||||||
|
if !strings.Contains(output, "role: Role!") {
|
||||||
|
t.Error("Expected 'role: Role!' in output")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriter_WriteDatabase_WithRelations(t *testing.T) {
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
|
||||||
|
// Create User table
|
||||||
|
userTable := models.InitTable("User", "public")
|
||||||
|
userIdCol := models.InitColumn("id", "User", "public")
|
||||||
|
userIdCol.Type = "bigint"
|
||||||
|
userIdCol.IsPrimaryKey = true
|
||||||
|
userIdCol.NotNull = true
|
||||||
|
userTable.Columns["id"] = userIdCol
|
||||||
|
|
||||||
|
userNameCol := models.InitColumn("name", "User", "public")
|
||||||
|
userNameCol.Type = "text"
|
||||||
|
userNameCol.NotNull = true
|
||||||
|
userTable.Columns["name"] = userNameCol
|
||||||
|
|
||||||
|
// Create Post table with FK to User
|
||||||
|
postTable := models.InitTable("Post", "public")
|
||||||
|
|
||||||
|
postIdCol := models.InitColumn("id", "Post", "public")
|
||||||
|
postIdCol.Type = "bigint"
|
||||||
|
postIdCol.IsPrimaryKey = true
|
||||||
|
postIdCol.NotNull = true
|
||||||
|
postTable.Columns["id"] = postIdCol
|
||||||
|
|
||||||
|
titleCol := models.InitColumn("title", "Post", "public")
|
||||||
|
titleCol.Type = "text"
|
||||||
|
titleCol.NotNull = true
|
||||||
|
postTable.Columns["title"] = titleCol
|
||||||
|
|
||||||
|
authorIdCol := models.InitColumn("authorId", "Post", "public")
|
||||||
|
authorIdCol.Type = "bigint"
|
||||||
|
authorIdCol.NotNull = true
|
||||||
|
postTable.Columns["authorId"] = authorIdCol
|
||||||
|
|
||||||
|
// Add FK constraint
|
||||||
|
fkConstraint := models.InitConstraint("fk_post_author", models.ForeignKeyConstraint)
|
||||||
|
fkConstraint.Schema = "public"
|
||||||
|
fkConstraint.Table = "Post"
|
||||||
|
fkConstraint.Columns = []string{"authorId"}
|
||||||
|
fkConstraint.ReferencedSchema = "public"
|
||||||
|
fkConstraint.ReferencedTable = "User"
|
||||||
|
fkConstraint.ReferencedColumns = []string{"id"}
|
||||||
|
postTable.Constraints["fk_post_author"] = fkConstraint
|
||||||
|
|
||||||
|
schema.Tables = []*models.Table{userTable, postTable}
|
||||||
|
|
||||||
|
db := models.InitDatabase("test")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
opts := &writers.WriterOptions{}
|
||||||
|
writer := NewWriter(opts)
|
||||||
|
|
||||||
|
output := writer.databaseToGraphQL(db)
|
||||||
|
|
||||||
|
// Verify Post has author field (forward relationship)
|
||||||
|
if !strings.Contains(output, "author: User!") {
|
||||||
|
t.Error("Expected 'author: User!' in Post type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify authorId FK column is NOT in the output
|
||||||
|
if strings.Contains(output, "authorId:") {
|
||||||
|
t.Error("Did not expect 'authorId:' field in output (FK columns should be hidden)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify User has posts field (reverse relationship)
|
||||||
|
if !strings.Contains(output, "posts: [Post!]!") {
|
||||||
|
t.Error("Expected 'posts: [Post!]!' in User type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriter_WriteDatabase_CustomScalars(t *testing.T) {
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
|
||||||
|
table := models.InitTable("Event", "public")
|
||||||
|
|
||||||
|
idCol := models.InitColumn("id", "Event", "public")
|
||||||
|
idCol.Type = "bigint"
|
||||||
|
idCol.IsPrimaryKey = true
|
||||||
|
idCol.NotNull = true
|
||||||
|
table.Columns["id"] = idCol
|
||||||
|
|
||||||
|
createdAtCol := models.InitColumn("createdAt", "Event", "public")
|
||||||
|
createdAtCol.Type = "timestamp"
|
||||||
|
createdAtCol.NotNull = true
|
||||||
|
table.Columns["createdAt"] = createdAtCol
|
||||||
|
|
||||||
|
metadataCol := models.InitColumn("metadata", "Event", "public")
|
||||||
|
metadataCol.Type = "jsonb"
|
||||||
|
metadataCol.NotNull = false
|
||||||
|
table.Columns["metadata"] = metadataCol
|
||||||
|
|
||||||
|
dateCol := models.InitColumn("eventDate", "Event", "public")
|
||||||
|
dateCol.Type = "date"
|
||||||
|
dateCol.NotNull = false
|
||||||
|
table.Columns["eventDate"] = dateCol
|
||||||
|
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
|
||||||
|
db := models.InitDatabase("test")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
opts := &writers.WriterOptions{}
|
||||||
|
writer := NewWriter(opts)
|
||||||
|
|
||||||
|
output := writer.databaseToGraphQL(db)
|
||||||
|
|
||||||
|
// Verify scalar declarations
|
||||||
|
if !strings.Contains(output, "scalar DateTime") {
|
||||||
|
t.Error("Expected 'scalar DateTime' declaration")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(output, "scalar JSON") {
|
||||||
|
t.Error("Expected 'scalar JSON' declaration")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(output, "scalar Date") {
|
||||||
|
t.Error("Expected 'scalar Date' declaration")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify field types
|
||||||
|
if !strings.Contains(output, "createdAt: DateTime!") {
|
||||||
|
t.Error("Expected 'createdAt: DateTime!' in output")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(output, "metadata: JSON") {
|
||||||
|
t.Error("Expected 'metadata: JSON' in output")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(output, "eventDate: Date") {
|
||||||
|
t.Error("Expected 'eventDate: Date' in output")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriter_WriteDatabase_ManyToMany(t *testing.T) {
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
|
||||||
|
// Create Post table
|
||||||
|
postTable := models.InitTable("Post", "public")
|
||||||
|
postIdCol := models.InitColumn("id", "Post", "public")
|
||||||
|
postIdCol.Type = "bigint"
|
||||||
|
postIdCol.IsPrimaryKey = true
|
||||||
|
postIdCol.NotNull = true
|
||||||
|
postTable.Columns["id"] = postIdCol
|
||||||
|
|
||||||
|
titleCol := models.InitColumn("title", "Post", "public")
|
||||||
|
titleCol.Type = "text"
|
||||||
|
titleCol.NotNull = true
|
||||||
|
postTable.Columns["title"] = titleCol
|
||||||
|
|
||||||
|
// Create Tag table
|
||||||
|
tagTable := models.InitTable("Tag", "public")
|
||||||
|
tagIdCol := models.InitColumn("id", "Tag", "public")
|
||||||
|
tagIdCol.Type = "bigint"
|
||||||
|
tagIdCol.IsPrimaryKey = true
|
||||||
|
tagIdCol.NotNull = true
|
||||||
|
tagTable.Columns["id"] = tagIdCol
|
||||||
|
|
||||||
|
nameCol := models.InitColumn("name", "Tag", "public")
|
||||||
|
nameCol.Type = "text"
|
||||||
|
nameCol.NotNull = true
|
||||||
|
tagTable.Columns["name"] = nameCol
|
||||||
|
|
||||||
|
// Create PostTag join table
|
||||||
|
joinTable := models.InitTable("PostTag", "public")
|
||||||
|
|
||||||
|
postIdJoinCol := models.InitColumn("postId", "PostTag", "public")
|
||||||
|
postIdJoinCol.Type = "bigint"
|
||||||
|
postIdJoinCol.NotNull = true
|
||||||
|
postIdJoinCol.IsPrimaryKey = true
|
||||||
|
joinTable.Columns["postId"] = postIdJoinCol
|
||||||
|
|
||||||
|
tagIdJoinCol := models.InitColumn("tagId", "PostTag", "public")
|
||||||
|
tagIdJoinCol.Type = "bigint"
|
||||||
|
tagIdJoinCol.NotNull = true
|
||||||
|
tagIdJoinCol.IsPrimaryKey = true
|
||||||
|
joinTable.Columns["tagId"] = tagIdJoinCol
|
||||||
|
|
||||||
|
// Add composite PK constraint
|
||||||
|
pkConstraint := models.InitConstraint("pk_posttag", models.PrimaryKeyConstraint)
|
||||||
|
pkConstraint.Schema = "public"
|
||||||
|
pkConstraint.Table = "PostTag"
|
||||||
|
pkConstraint.Columns = []string{"postId", "tagId"}
|
||||||
|
joinTable.Constraints["pk_posttag"] = pkConstraint
|
||||||
|
|
||||||
|
// Add FK to Post
|
||||||
|
fk1 := models.InitConstraint("fk_posttag_post", models.ForeignKeyConstraint)
|
||||||
|
fk1.Schema = "public"
|
||||||
|
fk1.Table = "PostTag"
|
||||||
|
fk1.Columns = []string{"postId"}
|
||||||
|
fk1.ReferencedSchema = "public"
|
||||||
|
fk1.ReferencedTable = "Post"
|
||||||
|
fk1.ReferencedColumns = []string{"id"}
|
||||||
|
joinTable.Constraints["fk_posttag_post"] = fk1
|
||||||
|
|
||||||
|
// Add FK to Tag
|
||||||
|
fk2 := models.InitConstraint("fk_posttag_tag", models.ForeignKeyConstraint)
|
||||||
|
fk2.Schema = "public"
|
||||||
|
fk2.Table = "PostTag"
|
||||||
|
fk2.Columns = []string{"tagId"}
|
||||||
|
fk2.ReferencedSchema = "public"
|
||||||
|
fk2.ReferencedTable = "Tag"
|
||||||
|
fk2.ReferencedColumns = []string{"id"}
|
||||||
|
joinTable.Constraints["fk_posttag_tag"] = fk2
|
||||||
|
|
||||||
|
schema.Tables = []*models.Table{postTable, tagTable, joinTable}
|
||||||
|
|
||||||
|
db := models.InitDatabase("test")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
opts := &writers.WriterOptions{}
|
||||||
|
writer := NewWriter(opts)
|
||||||
|
|
||||||
|
output := writer.databaseToGraphQL(db)
|
||||||
|
|
||||||
|
// Verify join table is NOT in output
|
||||||
|
if strings.Contains(output, "type PostTag") {
|
||||||
|
t.Error("Did not expect 'type PostTag' (join tables should be hidden)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify Post has tags field
|
||||||
|
if !strings.Contains(output, "tags: [Tag!]!") {
|
||||||
|
t.Error("Expected 'tags: [Tag!]!' in Post type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify Tag has posts field
|
||||||
|
if !strings.Contains(output, "posts: [Post!]!") {
|
||||||
|
t.Error("Expected 'posts: [Post!]!' in Tag type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriter_WriteDatabase_UUIDType(t *testing.T) {
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
|
||||||
|
table := models.InitTable("User", "public")
|
||||||
|
|
||||||
|
idCol := models.InitColumn("id", "User", "public")
|
||||||
|
idCol.Type = "uuid"
|
||||||
|
idCol.IsPrimaryKey = true
|
||||||
|
idCol.NotNull = true
|
||||||
|
table.Columns["id"] = idCol
|
||||||
|
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
|
||||||
|
db := models.InitDatabase("test")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
opts := &writers.WriterOptions{}
|
||||||
|
writer := NewWriter(opts)
|
||||||
|
|
||||||
|
output := writer.databaseToGraphQL(db)
|
||||||
|
|
||||||
|
// UUID primary keys should still map to ID
|
||||||
|
if !strings.Contains(output, "id: ID!") {
|
||||||
|
t.Error("Expected 'id: ID!' for UUID primary key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriter_Metadata_NoScalarDeclarations(t *testing.T) {
|
||||||
|
schema := models.InitSchema("public")
|
||||||
|
|
||||||
|
table := models.InitTable("Event", "public")
|
||||||
|
|
||||||
|
idCol := models.InitColumn("id", "Event", "public")
|
||||||
|
idCol.Type = "bigint"
|
||||||
|
idCol.IsPrimaryKey = true
|
||||||
|
table.Columns["id"] = idCol
|
||||||
|
|
||||||
|
createdAtCol := models.InitColumn("createdAt", "Event", "public")
|
||||||
|
createdAtCol.Type = "timestamp"
|
||||||
|
createdAtCol.NotNull = true
|
||||||
|
table.Columns["createdAt"] = createdAtCol
|
||||||
|
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
|
||||||
|
db := models.InitDatabase("test")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
opts := &writers.WriterOptions{
|
||||||
|
Metadata: map[string]any{
|
||||||
|
"includeScalarDeclarations": false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
writer := NewWriter(opts)
|
||||||
|
|
||||||
|
output := writer.databaseToGraphQL(db)
|
||||||
|
|
||||||
|
// Verify no scalar declarations
|
||||||
|
if strings.Contains(output, "scalar DateTime") {
|
||||||
|
t.Error("Did not expect 'scalar DateTime' with includeScalarDeclarations=false")
|
||||||
|
}
|
||||||
|
|
||||||
|
// But field should still use DateTime
|
||||||
|
if !strings.Contains(output, "createdAt: DateTime!") {
|
||||||
|
t.Error("Expected 'createdAt: DateTime!' in output")
|
||||||
|
}
|
||||||
|
}
|
||||||
277
pkg/writers/json/README.md
Normal file
277
pkg/writers/json/README.md
Normal file
@@ -0,0 +1,277 @@
|
|||||||
|
# JSON Writer
|
||||||
|
|
||||||
|
Generates database schema definitions in JSON format.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The JSON Writer converts RelSpec's internal database model representation into JSON format, providing a complete, structured representation of the database schema.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates RelSpec's canonical JSON schema format
|
||||||
|
- Complete schema representation including:
|
||||||
|
- Databases and schemas
|
||||||
|
- Tables, columns, and data types
|
||||||
|
- Constraints (PK, FK, unique, check)
|
||||||
|
- Indexes
|
||||||
|
- Relationships
|
||||||
|
- Views and sequences
|
||||||
|
- Pretty-printed, human-readable output
|
||||||
|
- Suitable for version control
|
||||||
|
- Ideal interchange format
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.json",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := json.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Export PostgreSQL database to JSON
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output json \
|
||||||
|
--out-file schema.json
|
||||||
|
|
||||||
|
# Convert GORM models to JSON
|
||||||
|
relspec --input gorm --in-file models.go --output json --out-file schema.json
|
||||||
|
|
||||||
|
# Convert DBML to JSON
|
||||||
|
relspec --input dbml --in-file diagram.dbml --output json --out-file schema.json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated JSON Example
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "myapp",
|
||||||
|
"description": "",
|
||||||
|
"database_type": "postgresql",
|
||||||
|
"database_version": "",
|
||||||
|
"source_format": "pgsql",
|
||||||
|
"schemas": [
|
||||||
|
{
|
||||||
|
"name": "public",
|
||||||
|
"description": "",
|
||||||
|
"tables": [
|
||||||
|
{
|
||||||
|
"name": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"description": "",
|
||||||
|
"columns": {
|
||||||
|
"id": {
|
||||||
|
"name": "id",
|
||||||
|
"table": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"type": "bigint",
|
||||||
|
"length": 0,
|
||||||
|
"precision": 0,
|
||||||
|
"scale": 0,
|
||||||
|
"not_null": true,
|
||||||
|
"is_primary_key": true,
|
||||||
|
"auto_increment": true,
|
||||||
|
"default": "",
|
||||||
|
"sequence": 1
|
||||||
|
},
|
||||||
|
"username": {
|
||||||
|
"name": "username",
|
||||||
|
"table": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"type": "varchar",
|
||||||
|
"length": 50,
|
||||||
|
"not_null": true,
|
||||||
|
"is_primary_key": false,
|
||||||
|
"auto_increment": false,
|
||||||
|
"sequence": 2
|
||||||
|
},
|
||||||
|
"email": {
|
||||||
|
"name": "email",
|
||||||
|
"table": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"type": "varchar",
|
||||||
|
"length": 100,
|
||||||
|
"not_null": true,
|
||||||
|
"sequence": 3
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"constraints": {
|
||||||
|
"pk_users": {
|
||||||
|
"name": "pk_users",
|
||||||
|
"type": "PRIMARY KEY",
|
||||||
|
"table": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"columns": ["id"]
|
||||||
|
},
|
||||||
|
"uq_users_username": {
|
||||||
|
"name": "uq_users_username",
|
||||||
|
"type": "UNIQUE",
|
||||||
|
"table": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"columns": ["username"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"indexes": {
|
||||||
|
"idx_users_email": {
|
||||||
|
"name": "idx_users_email",
|
||||||
|
"table": "users",
|
||||||
|
"schema": "public",
|
||||||
|
"columns": ["email"],
|
||||||
|
"unique": false,
|
||||||
|
"type": "btree"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"relationships": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "posts",
|
||||||
|
"schema": "public",
|
||||||
|
"columns": {
|
||||||
|
"id": {
|
||||||
|
"name": "id",
|
||||||
|
"type": "bigint",
|
||||||
|
"not_null": true,
|
||||||
|
"is_primary_key": true,
|
||||||
|
"sequence": 1
|
||||||
|
},
|
||||||
|
"user_id": {
|
||||||
|
"name": "user_id",
|
||||||
|
"type": "bigint",
|
||||||
|
"not_null": true,
|
||||||
|
"sequence": 2
|
||||||
|
},
|
||||||
|
"title": {
|
||||||
|
"name": "title",
|
||||||
|
"type": "varchar",
|
||||||
|
"length": 200,
|
||||||
|
"not_null": true,
|
||||||
|
"sequence": 3
|
||||||
|
},
|
||||||
|
"content": {
|
||||||
|
"name": "content",
|
||||||
|
"type": "text",
|
||||||
|
"not_null": false,
|
||||||
|
"sequence": 4
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"constraints": {
|
||||||
|
"fk_posts_user_id": {
|
||||||
|
"name": "fk_posts_user_id",
|
||||||
|
"type": "FOREIGN KEY",
|
||||||
|
"table": "posts",
|
||||||
|
"schema": "public",
|
||||||
|
"columns": ["user_id"],
|
||||||
|
"referenced_table": "users",
|
||||||
|
"referenced_schema": "public",
|
||||||
|
"referenced_columns": ["id"],
|
||||||
|
"on_delete": "CASCADE",
|
||||||
|
"on_update": "NO ACTION"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"indexes": {
|
||||||
|
"idx_posts_user_id": {
|
||||||
|
"name": "idx_posts_user_id",
|
||||||
|
"columns": ["user_id"],
|
||||||
|
"unique": false,
|
||||||
|
"type": "btree"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"views": [],
|
||||||
|
"sequences": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Schema Structure
|
||||||
|
|
||||||
|
The JSON format includes:
|
||||||
|
|
||||||
|
### Database Level
|
||||||
|
- `name` - Database name
|
||||||
|
- `description` - Database description
|
||||||
|
- `database_type` - Database system type
|
||||||
|
- `database_version` - Version information
|
||||||
|
- `source_format` - Original source format
|
||||||
|
- `schemas` - Array of schema objects
|
||||||
|
|
||||||
|
### Schema Level
|
||||||
|
- `name` - Schema name
|
||||||
|
- `description` - Schema description
|
||||||
|
- `tables` - Array of table objects
|
||||||
|
- `views` - Array of view objects
|
||||||
|
- `sequences` - Array of sequence objects
|
||||||
|
|
||||||
|
### Table Level
|
||||||
|
- `name` - Table name
|
||||||
|
- `schema` - Schema name
|
||||||
|
- `description` - Table description
|
||||||
|
- `columns` - Map of column objects
|
||||||
|
- `constraints` - Map of constraint objects
|
||||||
|
- `indexes` - Map of index objects
|
||||||
|
- `relationships` - Map of relationship objects
|
||||||
|
|
||||||
|
### Column Level
|
||||||
|
- `name` - Column name
|
||||||
|
- `type` - Data type
|
||||||
|
- `length` - Type length
|
||||||
|
- `precision`, `scale` - Numeric precision
|
||||||
|
- `not_null` - NOT NULL flag
|
||||||
|
- `is_primary_key` - Primary key flag
|
||||||
|
- `auto_increment` - Auto-increment flag
|
||||||
|
- `default` - Default value
|
||||||
|
- `sequence` - Column order
|
||||||
|
|
||||||
|
### Constraint Level
|
||||||
|
- `name` - Constraint name
|
||||||
|
- `type` - Constraint type (PRIMARY KEY, FOREIGN KEY, UNIQUE, CHECK)
|
||||||
|
- `columns` - Constrained columns
|
||||||
|
- `referenced_table`, `referenced_schema` - FK references
|
||||||
|
- `referenced_columns` - Referenced columns
|
||||||
|
- `on_delete`, `on_update` - FK actions
|
||||||
|
|
||||||
|
### Index Level
|
||||||
|
- `name` - Index name
|
||||||
|
- `columns` - Indexed columns
|
||||||
|
- `unique` - Unique flag
|
||||||
|
- `type` - Index type
|
||||||
|
|
||||||
|
## Use Cases
|
||||||
|
|
||||||
|
- **Version Control** - Track schema changes in git
|
||||||
|
- **Documentation** - Human-readable schema documentation
|
||||||
|
- **Interchange** - Standard format for tool integration
|
||||||
|
- **Backup** - Schema backup without database access
|
||||||
|
- **Testing** - Test data for schema validation
|
||||||
|
- **API** - Schema information for APIs
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Output is pretty-printed with 2-space indentation
|
||||||
|
- Preserves all schema metadata
|
||||||
|
- Can be round-tripped (read and write) without loss
|
||||||
|
- Schema-agnostic format
|
||||||
|
- Ideal for automation and tooling
|
||||||
195
pkg/writers/pgsql/README.md
Normal file
195
pkg/writers/pgsql/README.md
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
# PostgreSQL Writer
|
||||||
|
|
||||||
|
Generates PostgreSQL DDL (Data Definition Language) SQL scripts from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The PostgreSQL Writer converts RelSpec's internal database model representation into PostgreSQL-compatible SQL DDL scripts, including CREATE TABLE statements, constraints, indexes, views, and sequences.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates complete PostgreSQL DDL
|
||||||
|
- Creates schemas, tables, columns
|
||||||
|
- Defines constraints (PK, FK, unique, check)
|
||||||
|
- Creates indexes
|
||||||
|
- Generates views and sequences
|
||||||
|
- Supports migration scripts
|
||||||
|
- Includes audit triggers (optional)
|
||||||
|
- Handles PostgreSQL-specific data types
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.sql",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := pgsql.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate PostgreSQL DDL from JSON schema
|
||||||
|
relspec --input json \
|
||||||
|
--in-file schema.json \
|
||||||
|
--output pgsql \
|
||||||
|
--out-file schema.sql
|
||||||
|
|
||||||
|
# Convert GORM models to PostgreSQL DDL
|
||||||
|
relspec --input gorm \
|
||||||
|
--in-file models.go \
|
||||||
|
--output pgsql \
|
||||||
|
--out-file create_tables.sql
|
||||||
|
|
||||||
|
# Export live database schema to SQL
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/source_db" \
|
||||||
|
--output pgsql \
|
||||||
|
--out-file backup_schema.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated SQL Example
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Schema: public
|
||||||
|
|
||||||
|
CREATE SCHEMA IF NOT EXISTS public;
|
||||||
|
|
||||||
|
-- Table: public.users
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS public.users (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
username VARCHAR(50) NOT NULL,
|
||||||
|
email VARCHAR(100) NOT NULL,
|
||||||
|
bio TEXT,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Constraints for public.users
|
||||||
|
|
||||||
|
ALTER TABLE public.users
|
||||||
|
ADD CONSTRAINT uq_users_username UNIQUE (username);
|
||||||
|
|
||||||
|
-- Indexes for public.users
|
||||||
|
|
||||||
|
CREATE INDEX idx_users_email ON public.users (email);
|
||||||
|
|
||||||
|
-- Table: public.posts
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS public.posts (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
user_id BIGINT NOT NULL,
|
||||||
|
title VARCHAR(200) NOT NULL,
|
||||||
|
content TEXT,
|
||||||
|
created_at TIMESTAMP DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Foreign Keys for public.posts
|
||||||
|
|
||||||
|
ALTER TABLE public.posts
|
||||||
|
ADD CONSTRAINT fk_posts_user_id
|
||||||
|
FOREIGN KEY (user_id)
|
||||||
|
REFERENCES public.users (id)
|
||||||
|
ON DELETE CASCADE
|
||||||
|
ON UPDATE NO ACTION;
|
||||||
|
|
||||||
|
-- Indexes for public.posts
|
||||||
|
|
||||||
|
CREATE INDEX idx_posts_user_id ON public.posts (user_id);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Writer Options
|
||||||
|
|
||||||
|
### Metadata Options
|
||||||
|
|
||||||
|
```go
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.sql",
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"include_drop": true, // Include DROP statements
|
||||||
|
"include_audit": true, // Include audit triggers
|
||||||
|
"if_not_exists": true, // Use IF NOT EXISTS
|
||||||
|
"migration_mode": false, // Generate migration script
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### Full DDL Generation
|
||||||
|
|
||||||
|
Generates complete database structure:
|
||||||
|
- CREATE SCHEMA statements
|
||||||
|
- CREATE TABLE with all columns and types
|
||||||
|
- PRIMARY KEY constraints
|
||||||
|
- FOREIGN KEY constraints with actions
|
||||||
|
- UNIQUE constraints
|
||||||
|
- CHECK constraints
|
||||||
|
- CREATE INDEX statements
|
||||||
|
- CREATE VIEW statements
|
||||||
|
- CREATE SEQUENCE statements
|
||||||
|
|
||||||
|
### Migration Mode
|
||||||
|
|
||||||
|
When `migration_mode` is enabled, generates migration scripts with:
|
||||||
|
- Version tracking
|
||||||
|
- Up/down migrations
|
||||||
|
- Transactional DDL
|
||||||
|
- Rollback support
|
||||||
|
|
||||||
|
### Audit Triggers
|
||||||
|
|
||||||
|
When `include_audit` is enabled, adds:
|
||||||
|
- Created/updated timestamp triggers
|
||||||
|
- Audit logging functionality
|
||||||
|
- Change tracking
|
||||||
|
|
||||||
|
## PostgreSQL-Specific Features
|
||||||
|
|
||||||
|
- Serial types (SERIAL, BIGSERIAL)
|
||||||
|
- Advanced types (UUID, JSONB, ARRAY)
|
||||||
|
- Schema-qualified names
|
||||||
|
- Constraint actions (CASCADE, RESTRICT, SET NULL)
|
||||||
|
- Partial indexes
|
||||||
|
- Function-based indexes
|
||||||
|
- Check constraints with expressions
|
||||||
|
|
||||||
|
## Data Types
|
||||||
|
|
||||||
|
Supports all PostgreSQL data types:
|
||||||
|
- Integer types: SMALLINT, INTEGER, BIGINT, SERIAL, BIGSERIAL
|
||||||
|
- Numeric types: NUMERIC, DECIMAL, REAL, DOUBLE PRECISION
|
||||||
|
- String types: VARCHAR, CHAR, TEXT
|
||||||
|
- Date/Time: DATE, TIME, TIMESTAMP, TIMESTAMPTZ, INTERVAL
|
||||||
|
- Boolean: BOOLEAN
|
||||||
|
- Binary: BYTEA
|
||||||
|
- JSON: JSON, JSONB
|
||||||
|
- UUID: UUID
|
||||||
|
- Network: INET, CIDR, MACADDR
|
||||||
|
- Special: ARRAY, HSTORE
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Generated SQL is formatted and readable
|
||||||
|
- Comments are preserved from source schema
|
||||||
|
- Schema names are fully qualified
|
||||||
|
- Default values are properly quoted
|
||||||
|
- Constraint names follow PostgreSQL conventions
|
||||||
|
- Compatible with PostgreSQL 12+
|
||||||
135
pkg/writers/prisma/README.md
Normal file
135
pkg/writers/prisma/README.md
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
# Prisma Writer
|
||||||
|
|
||||||
|
Generates Prisma schema files from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Prisma Writer converts RelSpec's internal database model representation into Prisma schema language (`.prisma` files), complete with models, fields, relationships, and attributes.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates Prisma schema syntax
|
||||||
|
- Creates model definitions with proper field types
|
||||||
|
- Adds Prisma attributes (@id, @unique, @default, etc.)
|
||||||
|
- Generates relationship fields
|
||||||
|
- Includes datasource and generator configurations
|
||||||
|
- Maps table/column names with @map and @@map
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/prisma"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.prisma",
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"datasource_provider": "postgresql",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := prisma.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate Prisma schema from PostgreSQL database
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output prisma \
|
||||||
|
--out-file schema.prisma
|
||||||
|
|
||||||
|
# Convert GORM models to Prisma
|
||||||
|
relspec --input gorm --in-file models.go --output prisma --out-file schema.prisma
|
||||||
|
|
||||||
|
# Convert JSON to Prisma schema
|
||||||
|
relspec --input json --in-file database.json --output prisma --out-file prisma/schema.prisma
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated Code Example
|
||||||
|
|
||||||
|
```prisma
|
||||||
|
datasource db {
|
||||||
|
provider = "postgresql"
|
||||||
|
url = env("DATABASE_URL")
|
||||||
|
}
|
||||||
|
|
||||||
|
generator client {
|
||||||
|
provider = "prisma-client-js"
|
||||||
|
}
|
||||||
|
|
||||||
|
model User {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
username String @unique @db.VarChar(50)
|
||||||
|
email String @db.VarChar(100)
|
||||||
|
bio String? @db.Text
|
||||||
|
createdAt DateTime @default(now()) @map("created_at")
|
||||||
|
|
||||||
|
posts Post[]
|
||||||
|
|
||||||
|
@@map("users")
|
||||||
|
}
|
||||||
|
|
||||||
|
model Post {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
userId Int @map("user_id")
|
||||||
|
title String @db.VarChar(200)
|
||||||
|
content String? @db.Text
|
||||||
|
|
||||||
|
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
|
@@map("posts")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Prisma Attributes
|
||||||
|
|
||||||
|
### Field Attributes
|
||||||
|
- `@id` - Primary key
|
||||||
|
- `@unique` - Unique constraint
|
||||||
|
- `@default()` - Default value
|
||||||
|
- `@map()` - Column name mapping
|
||||||
|
- `@db.*` - Database-specific types
|
||||||
|
- `@relation()` - Relationship definition
|
||||||
|
|
||||||
|
### Model Attributes
|
||||||
|
- `@@map()` - Table name mapping
|
||||||
|
- `@@unique()` - Composite unique constraints
|
||||||
|
- `@@index()` - Index definitions
|
||||||
|
- `@@id()` - Composite primary keys
|
||||||
|
|
||||||
|
## Type Mapping
|
||||||
|
|
||||||
|
| SQL Type | Prisma Type | Database Type |
|
||||||
|
|----------|-------------|---------------|
|
||||||
|
| bigint | Int | @db.BigInt |
|
||||||
|
| integer | Int | - |
|
||||||
|
| varchar(n) | String | @db.VarChar(n) |
|
||||||
|
| text | String | @db.Text |
|
||||||
|
| boolean | Boolean | - |
|
||||||
|
| timestamp | DateTime | @db.Timestamp |
|
||||||
|
| uuid | String | @db.Uuid |
|
||||||
|
| json | Json | - |
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Model names are PascalCase (e.g., `User`, `Post`)
|
||||||
|
- Field names are camelCase with `@map` for snake_case columns
|
||||||
|
- Table names use `@@map` when different from model name
|
||||||
|
- Nullable fields are marked with `?`
|
||||||
|
- Relationship fields are automatically generated
|
||||||
|
- Datasource provider defaults to `postgresql`
|
||||||
551
pkg/writers/prisma/writer.go
Normal file
551
pkg/writers/prisma/writer.go
Normal file
@@ -0,0 +1,551 @@
|
|||||||
|
package prisma
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Writer implements the writers.Writer interface for Prisma schema format
|
||||||
|
type Writer struct {
|
||||||
|
options *writers.WriterOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter creates a new Prisma writer with the given options
|
||||||
|
func NewWriter(options *writers.WriterOptions) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteDatabase writes a Database model to Prisma schema format
|
||||||
|
func (w *Writer) WriteDatabase(db *models.Database) error {
|
||||||
|
content := w.databaseToPrisma(db)
|
||||||
|
|
||||||
|
if w.options.OutputPath != "" {
|
||||||
|
return os.WriteFile(w.options.OutputPath, []byte(content), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print(content)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSchema writes a Schema model to Prisma schema format
|
||||||
|
func (w *Writer) WriteSchema(schema *models.Schema) error {
|
||||||
|
// Create temporary database for schema
|
||||||
|
db := models.InitDatabase("database")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
return w.WriteDatabase(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTable writes a Table model to Prisma schema format
|
||||||
|
func (w *Writer) WriteTable(table *models.Table) error {
|
||||||
|
// Create temporary schema and database for table
|
||||||
|
schema := models.InitSchema(table.Schema)
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
|
||||||
|
return w.WriteSchema(schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// databaseToPrisma converts a Database to Prisma schema format string
|
||||||
|
func (w *Writer) databaseToPrisma(db *models.Database) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Write datasource block
|
||||||
|
sb.WriteString(w.generateDatasource(db))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// Write generator block
|
||||||
|
sb.WriteString(w.generateGenerator())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// Process all schemas (typically just one in Prisma)
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
// Write enums
|
||||||
|
if len(schema.Enums) > 0 {
|
||||||
|
for _, enum := range schema.Enums {
|
||||||
|
sb.WriteString(w.enumToPrisma(enum))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Identify join tables for implicit M2M
|
||||||
|
joinTables := w.identifyJoinTables(schema)
|
||||||
|
|
||||||
|
// Write models (excluding join tables)
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if joinTables[table.Name] {
|
||||||
|
continue // Skip join tables
|
||||||
|
}
|
||||||
|
sb.WriteString(w.tableToPrisma(table, schema, joinTables))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateDatasource generates the datasource block
|
||||||
|
func (w *Writer) generateDatasource(db *models.Database) string {
|
||||||
|
provider := "postgresql"
|
||||||
|
|
||||||
|
// Map database type to Prisma provider
|
||||||
|
switch db.DatabaseType {
|
||||||
|
case models.PostgresqlDatabaseType:
|
||||||
|
provider = "postgresql"
|
||||||
|
case models.MSSQLDatabaseType:
|
||||||
|
provider = "sqlserver"
|
||||||
|
case models.SqlLiteDatabaseType:
|
||||||
|
provider = "sqlite"
|
||||||
|
case "mysql":
|
||||||
|
provider = "mysql"
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(`datasource db {
|
||||||
|
provider = "%s"
|
||||||
|
url = env("DATABASE_URL")
|
||||||
|
}
|
||||||
|
`, provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateGenerator generates the generator block
|
||||||
|
func (w *Writer) generateGenerator() string {
|
||||||
|
return `generator client {
|
||||||
|
provider = "prisma-client-js"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
// enumToPrisma converts an Enum to Prisma enum block
|
||||||
|
func (w *Writer) enumToPrisma(enum *models.Enum) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf("enum %s {\n", enum.Name))
|
||||||
|
for _, value := range enum.Values {
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s\n", value))
|
||||||
|
}
|
||||||
|
sb.WriteString("}\n")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// identifyJoinTables identifies tables that are join tables for M2M relations
|
||||||
|
func (w *Writer) identifyJoinTables(schema *models.Schema) map[string]bool {
|
||||||
|
joinTables := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
// Check if this is a join table:
|
||||||
|
// 1. Starts with _ (Prisma convention)
|
||||||
|
// 2. Has exactly 2 FK constraints
|
||||||
|
// 3. Has composite PK with those 2 columns
|
||||||
|
// 4. Has no other columns except the FK columns
|
||||||
|
|
||||||
|
if !strings.HasPrefix(table.Name, "_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fks := table.GetForeignKeys()
|
||||||
|
if len(fks) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if columns are only the FK columns
|
||||||
|
if len(table.Columns) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if both FK columns are part of PK
|
||||||
|
pkCols := 0
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if col.IsPrimaryKey {
|
||||||
|
pkCols++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkCols == 2 {
|
||||||
|
joinTables[table.Name] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return joinTables
|
||||||
|
}
|
||||||
|
|
||||||
|
// tableToPrisma converts a Table to Prisma model block
|
||||||
|
func (w *Writer) tableToPrisma(table *models.Table, schema *models.Schema, joinTables map[string]bool) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf("model %s {\n", table.Name))
|
||||||
|
|
||||||
|
// Collect columns to write
|
||||||
|
columns := make([]*models.Column, 0, len(table.Columns))
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
columns = append(columns, col)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort columns for consistent output
|
||||||
|
sort.Slice(columns, func(i, j int) bool {
|
||||||
|
return columns[i].Name < columns[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
// Write scalar fields
|
||||||
|
for _, col := range columns {
|
||||||
|
// Skip if this column is part of a relation that will be output as array field
|
||||||
|
if w.isRelationColumn(col, table) {
|
||||||
|
// We'll output this with the relation field
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(w.columnToField(col, table, schema))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write relation fields
|
||||||
|
sb.WriteString(w.generateRelationFields(table, schema, joinTables))
|
||||||
|
|
||||||
|
// Write block attributes (@@id, @@unique, @@index)
|
||||||
|
sb.WriteString(w.generateBlockAttributes(table))
|
||||||
|
|
||||||
|
sb.WriteString("}\n")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// columnToField converts a Column to a Prisma field definition
|
||||||
|
func (w *Writer) columnToField(col *models.Column, table *models.Table, schema *models.Schema) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Field name
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s", col.Name))
|
||||||
|
|
||||||
|
// Field type
|
||||||
|
prismaType := w.sqlTypeToPrisma(col.Type, schema)
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s", prismaType))
|
||||||
|
|
||||||
|
// Optional modifier
|
||||||
|
if !col.NotNull && !col.IsPrimaryKey {
|
||||||
|
sb.WriteString("?")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field attributes
|
||||||
|
attributes := w.generateFieldAttributes(col, table)
|
||||||
|
if attributes != "" {
|
||||||
|
sb.WriteString(" ")
|
||||||
|
sb.WriteString(attributes)
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// sqlTypeToPrisma converts SQL types to Prisma types
|
||||||
|
func (w *Writer) sqlTypeToPrisma(sqlType string, schema *models.Schema) string {
|
||||||
|
// Check if it's an enum
|
||||||
|
for _, enum := range schema.Enums {
|
||||||
|
if strings.EqualFold(sqlType, enum.Name) {
|
||||||
|
return enum.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard type mapping
|
||||||
|
typeMap := map[string]string{
|
||||||
|
"text": "String",
|
||||||
|
"varchar": "String",
|
||||||
|
"character varying": "String",
|
||||||
|
"char": "String",
|
||||||
|
"boolean": "Boolean",
|
||||||
|
"bool": "Boolean",
|
||||||
|
"integer": "Int",
|
||||||
|
"int": "Int",
|
||||||
|
"int4": "Int",
|
||||||
|
"bigint": "BigInt",
|
||||||
|
"int8": "BigInt",
|
||||||
|
"double precision": "Float",
|
||||||
|
"float": "Float",
|
||||||
|
"float8": "Float",
|
||||||
|
"decimal": "Decimal",
|
||||||
|
"numeric": "Decimal",
|
||||||
|
"timestamp": "DateTime",
|
||||||
|
"timestamptz": "DateTime",
|
||||||
|
"date": "DateTime",
|
||||||
|
"jsonb": "Json",
|
||||||
|
"json": "Json",
|
||||||
|
"bytea": "Bytes",
|
||||||
|
}
|
||||||
|
|
||||||
|
for sqlPattern, prismaType := range typeMap {
|
||||||
|
if strings.Contains(strings.ToLower(sqlType), sqlPattern) {
|
||||||
|
return prismaType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to String for unknown types
|
||||||
|
return "String"
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateFieldAttributes generates field attributes like @id, @unique, @default
|
||||||
|
func (w *Writer) generateFieldAttributes(col *models.Column, table *models.Table) string {
|
||||||
|
attrs := make([]string, 0)
|
||||||
|
|
||||||
|
// @id
|
||||||
|
if col.IsPrimaryKey {
|
||||||
|
// Check if this is part of a composite key
|
||||||
|
pkCount := 0
|
||||||
|
for _, c := range table.Columns {
|
||||||
|
if c.IsPrimaryKey {
|
||||||
|
pkCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pkCount == 1 {
|
||||||
|
attrs = append(attrs, "@id")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// @unique
|
||||||
|
if w.hasUniqueConstraint(col.Name, table) {
|
||||||
|
attrs = append(attrs, "@unique")
|
||||||
|
}
|
||||||
|
|
||||||
|
// @default
|
||||||
|
if col.AutoIncrement {
|
||||||
|
attrs = append(attrs, "@default(autoincrement())")
|
||||||
|
} else if col.Default != nil {
|
||||||
|
defaultAttr := w.formatDefaultValue(col.Default)
|
||||||
|
if defaultAttr != "" {
|
||||||
|
attrs = append(attrs, fmt.Sprintf("@default(%s)", defaultAttr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// @updatedAt (check comment)
|
||||||
|
if strings.Contains(col.Comment, "@updatedAt") {
|
||||||
|
attrs = append(attrs, "@updatedAt")
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(attrs, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatDefaultValue formats a default value for Prisma
|
||||||
|
func (w *Writer) formatDefaultValue(defaultValue any) string {
|
||||||
|
switch v := defaultValue.(type) {
|
||||||
|
case string:
|
||||||
|
if v == "now()" {
|
||||||
|
return "now()"
|
||||||
|
} else if v == "gen_random_uuid()" {
|
||||||
|
return "uuid()"
|
||||||
|
} else if strings.Contains(strings.ToLower(v), "uuid") {
|
||||||
|
return "uuid()"
|
||||||
|
} else {
|
||||||
|
// String literal
|
||||||
|
return fmt.Sprintf(`"%s"`, v)
|
||||||
|
}
|
||||||
|
case bool:
|
||||||
|
if v {
|
||||||
|
return "true"
|
||||||
|
}
|
||||||
|
return "false"
|
||||||
|
case int, int64, int32:
|
||||||
|
return fmt.Sprintf("%v", v)
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%v", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasUniqueConstraint checks if a column has a unique constraint
|
||||||
|
func (w *Writer) hasUniqueConstraint(colName string, table *models.Table) bool {
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.UniqueConstraint &&
|
||||||
|
len(constraint.Columns) == 1 &&
|
||||||
|
constraint.Columns[0] == colName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRelationColumn checks if a column is a FK column
|
||||||
|
func (w *Writer) isRelationColumn(col *models.Column, table *models.Table) bool {
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
for _, fkCol := range constraint.Columns {
|
||||||
|
if fkCol == col.Name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateRelationFields generates relation fields and their FK columns
|
||||||
|
func (w *Writer) generateRelationFields(table *models.Table, schema *models.Schema, joinTables map[string]bool) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Get all FK constraints
|
||||||
|
fks := table.GetForeignKeys()
|
||||||
|
|
||||||
|
for _, fk := range fks {
|
||||||
|
// Generate the FK scalar field
|
||||||
|
for _, fkCol := range fk.Columns {
|
||||||
|
if col, exists := table.Columns[fkCol]; exists {
|
||||||
|
sb.WriteString(w.columnToField(col, table, schema))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the relation field
|
||||||
|
relationType := fk.ReferencedTable
|
||||||
|
isOptional := false
|
||||||
|
|
||||||
|
// Check if FK column is nullable
|
||||||
|
for _, fkCol := range fk.Columns {
|
||||||
|
if col, exists := table.Columns[fkCol]; exists {
|
||||||
|
if !col.NotNull {
|
||||||
|
isOptional = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
relationName := relationType
|
||||||
|
if strings.HasSuffix(strings.ToLower(relationName), "s") {
|
||||||
|
relationName = relationName[:len(relationName)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s %s", strings.ToLower(relationName), relationType))
|
||||||
|
|
||||||
|
if isOptional {
|
||||||
|
sb.WriteString("?")
|
||||||
|
}
|
||||||
|
|
||||||
|
// @relation attribute
|
||||||
|
relationAttr := w.generateRelationAttribute(fk)
|
||||||
|
if relationAttr != "" {
|
||||||
|
sb.WriteString(" ")
|
||||||
|
sb.WriteString(relationAttr)
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate inverse relations (arrays) for tables that reference this one
|
||||||
|
sb.WriteString(w.generateInverseRelations(table, schema, joinTables))
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateRelationAttribute generates the @relation(...) attribute
|
||||||
|
func (w *Writer) generateRelationAttribute(fk *models.Constraint) string {
|
||||||
|
parts := make([]string, 0)
|
||||||
|
|
||||||
|
// fields
|
||||||
|
fieldsStr := strings.Join(fk.Columns, ", ")
|
||||||
|
parts = append(parts, fmt.Sprintf("fields: [%s]", fieldsStr))
|
||||||
|
|
||||||
|
// references
|
||||||
|
referencesStr := strings.Join(fk.ReferencedColumns, ", ")
|
||||||
|
parts = append(parts, fmt.Sprintf("references: [%s]", referencesStr))
|
||||||
|
|
||||||
|
// onDelete
|
||||||
|
if fk.OnDelete != "" {
|
||||||
|
parts = append(parts, fmt.Sprintf("onDelete: %s", fk.OnDelete))
|
||||||
|
}
|
||||||
|
|
||||||
|
// onUpdate
|
||||||
|
if fk.OnUpdate != "" {
|
||||||
|
parts = append(parts, fmt.Sprintf("onUpdate: %s", fk.OnUpdate))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("@relation(%s)", strings.Join(parts, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateInverseRelations generates array fields for reverse relationships
|
||||||
|
func (w *Writer) generateInverseRelations(table *models.Table, schema *models.Schema, joinTables map[string]bool) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Find all tables that have FKs pointing to this table
|
||||||
|
for _, otherTable := range schema.Tables {
|
||||||
|
if otherTable.Name == table.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is a join table
|
||||||
|
if joinTables[otherTable.Name] {
|
||||||
|
// Handle implicit M2M
|
||||||
|
if w.isJoinTableFor(otherTable, table.Name) {
|
||||||
|
// Find the other side of the M2M
|
||||||
|
for _, fk := range otherTable.GetForeignKeys() {
|
||||||
|
if fk.ReferencedTable != table.Name {
|
||||||
|
// This is the other side
|
||||||
|
otherSide := fk.ReferencedTable
|
||||||
|
sb.WriteString(fmt.Sprintf(" %ss %s[]\n",
|
||||||
|
strings.ToLower(otherSide), otherSide))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regular one-to-many inverse relation
|
||||||
|
for _, fk := range otherTable.GetForeignKeys() {
|
||||||
|
if fk.ReferencedTable == table.Name {
|
||||||
|
// This table is referenced by otherTable
|
||||||
|
pluralName := otherTable.Name
|
||||||
|
if !strings.HasSuffix(pluralName, "s") {
|
||||||
|
pluralName += "s"
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s %s[]\n",
|
||||||
|
strings.ToLower(pluralName), otherTable.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isJoinTableFor checks if a table is a join table involving the specified model
|
||||||
|
func (w *Writer) isJoinTableFor(joinTable *models.Table, modelName string) bool {
|
||||||
|
for _, fk := range joinTable.GetForeignKeys() {
|
||||||
|
if fk.ReferencedTable == modelName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateBlockAttributes generates block-level attributes like @@id, @@unique, @@index
|
||||||
|
func (w *Writer) generateBlockAttributes(table *models.Table) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// @@id for composite primary key
|
||||||
|
pkCols := make([]string, 0)
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if col.IsPrimaryKey {
|
||||||
|
pkCols = append(pkCols, col.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pkCols) > 1 {
|
||||||
|
sort.Strings(pkCols)
|
||||||
|
sb.WriteString(fmt.Sprintf(" @@id([%s])\n", strings.Join(pkCols, ", ")))
|
||||||
|
}
|
||||||
|
|
||||||
|
// @@unique for multi-column unique constraints
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.UniqueConstraint && len(constraint.Columns) > 1 {
|
||||||
|
sb.WriteString(fmt.Sprintf(" @@unique([%s])\n", strings.Join(constraint.Columns, ", ")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// @@index for indexes
|
||||||
|
for _, index := range table.Indexes {
|
||||||
|
if !index.Unique { // Unique indexes are handled by @@unique
|
||||||
|
sb.WriteString(fmt.Sprintf(" @@index([%s])\n", strings.Join(index.Columns, ", ")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
169
pkg/writers/typeorm/README.md
Normal file
169
pkg/writers/typeorm/README.md
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
# TypeORM Writer
|
||||||
|
|
||||||
|
Generates TypeScript files with TypeORM entity definitions from database schema information.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The TypeORM Writer converts RelSpec's internal database model representation into TypeScript source code with TypeORM entity classes, including proper decorators, relationships, and column configurations.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates TypeORM-compatible TypeScript entities
|
||||||
|
- Creates proper decorator usage (@Entity, @Column, etc.)
|
||||||
|
- Adds relationship decorators (@OneToMany, @ManyToOne, @JoinColumn)
|
||||||
|
- Handles column types and options
|
||||||
|
- Supports constraints and indexes
|
||||||
|
- Outputs formatted TypeScript code
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/typeorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "entities/",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := typeorm.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate TypeORM entities from PostgreSQL database
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output typeorm \
|
||||||
|
--out-file entities/
|
||||||
|
|
||||||
|
# Convert GORM models to TypeORM
|
||||||
|
relspec --input gorm --in-file models.go --output typeorm --out-file src/entities/
|
||||||
|
|
||||||
|
# Convert JSON to TypeORM entities
|
||||||
|
relspec --input json --in-file schema.json --output typeorm --out-file entities/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated Code Example
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import {
|
||||||
|
Entity,
|
||||||
|
PrimaryGeneratedColumn,
|
||||||
|
Column,
|
||||||
|
CreateDateColumn,
|
||||||
|
OneToMany,
|
||||||
|
ManyToOne,
|
||||||
|
JoinColumn,
|
||||||
|
Index,
|
||||||
|
} from 'typeorm';
|
||||||
|
import { Post } from './Post';
|
||||||
|
|
||||||
|
@Entity('users')
|
||||||
|
export class User {
|
||||||
|
@PrimaryGeneratedColumn('increment')
|
||||||
|
id: number;
|
||||||
|
|
||||||
|
@Column({ type: 'varchar', length: 50, unique: true })
|
||||||
|
@Index()
|
||||||
|
username: string;
|
||||||
|
|
||||||
|
@Column({ type: 'varchar', length: 100 })
|
||||||
|
email: string;
|
||||||
|
|
||||||
|
@Column({ type: 'text', nullable: true })
|
||||||
|
bio: string | null;
|
||||||
|
|
||||||
|
@CreateDateColumn({ name: 'created_at' })
|
||||||
|
createdAt: Date;
|
||||||
|
|
||||||
|
@OneToMany(() => Post, (post) => post.user)
|
||||||
|
posts: Post[];
|
||||||
|
}
|
||||||
|
|
||||||
|
@Entity('posts')
|
||||||
|
export class Post {
|
||||||
|
@PrimaryGeneratedColumn('increment')
|
||||||
|
id: number;
|
||||||
|
|
||||||
|
@Column({ name: 'user_id' })
|
||||||
|
userId: number;
|
||||||
|
|
||||||
|
@Column({ type: 'varchar', length: 200 })
|
||||||
|
title: string;
|
||||||
|
|
||||||
|
@Column({ type: 'text', nullable: true })
|
||||||
|
content: string | null;
|
||||||
|
|
||||||
|
@ManyToOne(() => User, (user) => user.posts, { onDelete: 'CASCADE' })
|
||||||
|
@JoinColumn({ name: 'user_id' })
|
||||||
|
user: User;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported TypeORM Decorators
|
||||||
|
|
||||||
|
### Entity Decorators
|
||||||
|
- `@Entity()` - Define entity/table
|
||||||
|
- `@PrimaryGeneratedColumn()` - Auto-increment primary key
|
||||||
|
- `@PrimaryColumn()` - Primary key
|
||||||
|
- `@Column()` - Column definition
|
||||||
|
- `@CreateDateColumn()` - Auto-set creation timestamp
|
||||||
|
- `@UpdateDateColumn()` - Auto-update timestamp
|
||||||
|
|
||||||
|
### Relationship Decorators
|
||||||
|
- `@OneToMany()` - One-to-many relationship
|
||||||
|
- `@ManyToOne()` - Many-to-one relationship
|
||||||
|
- `@JoinColumn()` - Foreign key column specification
|
||||||
|
|
||||||
|
### Constraint Decorators
|
||||||
|
- `@Index()` - Create index
|
||||||
|
- `@Unique()` - Unique constraint
|
||||||
|
|
||||||
|
## Column Options
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
@Column({
|
||||||
|
type: 'varchar', // Column type
|
||||||
|
length: 255, // Length for varchar/char
|
||||||
|
nullable: true, // Allow NULL
|
||||||
|
unique: true, // Unique constraint
|
||||||
|
default: 'value', // Default value
|
||||||
|
name: 'column_name', // Database column name
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
## Type Mapping
|
||||||
|
|
||||||
|
| SQL Type | TypeScript Type | TypeORM Type |
|
||||||
|
|----------|-----------------|--------------|
|
||||||
|
| bigint | number | 'bigint' |
|
||||||
|
| integer | number | 'int' |
|
||||||
|
| varchar | string | 'varchar' |
|
||||||
|
| text | string | 'text' |
|
||||||
|
| boolean | boolean | 'boolean' |
|
||||||
|
| timestamp | Date | 'timestamp' |
|
||||||
|
| json | object | 'json' |
|
||||||
|
| uuid | string | 'uuid' |
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Entity class names are PascalCase
|
||||||
|
- One file per entity (named after the entity)
|
||||||
|
- Relationship imports are auto-generated
|
||||||
|
- Nullable columns use TypeScript union with `null`
|
||||||
|
- Foreign key actions (CASCADE, etc.) are included
|
||||||
|
- Schema names can be specified in `@Entity()` decorator
|
||||||
631
pkg/writers/typeorm/writer.go
Normal file
631
pkg/writers/typeorm/writer.go
Normal file
@@ -0,0 +1,631 @@
|
|||||||
|
package typeorm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Writer implements the writers.Writer interface for TypeORM entity format
|
||||||
|
type Writer struct {
|
||||||
|
options *writers.WriterOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter creates a new TypeORM writer with the given options
|
||||||
|
func NewWriter(options *writers.WriterOptions) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
options: options,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteDatabase writes a Database model to TypeORM entity format
|
||||||
|
func (w *Writer) WriteDatabase(db *models.Database) error {
|
||||||
|
content := w.databaseToTypeORM(db)
|
||||||
|
|
||||||
|
if w.options.OutputPath != "" {
|
||||||
|
return os.WriteFile(w.options.OutputPath, []byte(content), 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print(content)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSchema writes a Schema model to TypeORM entity format
|
||||||
|
func (w *Writer) WriteSchema(schema *models.Schema) error {
|
||||||
|
db := models.InitDatabase("database")
|
||||||
|
db.Schemas = []*models.Schema{schema}
|
||||||
|
|
||||||
|
return w.WriteDatabase(db)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTable writes a Table model to TypeORM entity format
|
||||||
|
func (w *Writer) WriteTable(table *models.Table) error {
|
||||||
|
schema := models.InitSchema(table.Schema)
|
||||||
|
schema.Tables = []*models.Table{table}
|
||||||
|
|
||||||
|
return w.WriteSchema(schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// databaseToTypeORM converts a Database to TypeORM entity format string
|
||||||
|
func (w *Writer) databaseToTypeORM(db *models.Database) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Generate imports
|
||||||
|
sb.WriteString(w.generateImports(db))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// Process all schemas
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
// Identify join tables
|
||||||
|
joinTables := w.identifyJoinTables(schema)
|
||||||
|
|
||||||
|
// Write entities (excluding join tables)
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if joinTables[table.Name] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sb.WriteString(w.tableToEntity(table, schema, joinTables))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write view entities
|
||||||
|
for _, view := range schema.Views {
|
||||||
|
sb.WriteString(w.viewToEntity(view))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateImports generates the TypeORM import statement
|
||||||
|
func (w *Writer) generateImports(db *models.Database) string {
|
||||||
|
imports := make([]string, 0)
|
||||||
|
|
||||||
|
// Always include basic decorators
|
||||||
|
imports = append(imports, "Entity", "PrimaryGeneratedColumn", "Column")
|
||||||
|
|
||||||
|
// Check if we need relation decorators
|
||||||
|
needsManyToOne := false
|
||||||
|
needsOneToMany := false
|
||||||
|
needsManyToMany := false
|
||||||
|
needsJoinTable := false
|
||||||
|
needsCreateDate := false
|
||||||
|
needsUpdateDate := false
|
||||||
|
needsViewEntity := false
|
||||||
|
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
// Check for views
|
||||||
|
if len(schema.Views) > 0 {
|
||||||
|
needsViewEntity = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
// Check for timestamp columns
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if col.Default == "now()" {
|
||||||
|
needsCreateDate = true
|
||||||
|
}
|
||||||
|
if strings.Contains(col.Comment, "auto-update") {
|
||||||
|
needsUpdateDate = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for relations
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
needsManyToOne = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OneToMany is the inverse of ManyToOne
|
||||||
|
if needsManyToOne {
|
||||||
|
needsOneToMany = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for M2M (join tables indicate M2M relations)
|
||||||
|
joinTables := make(map[string]bool)
|
||||||
|
for _, schema := range db.Schemas {
|
||||||
|
jt := w.identifyJoinTables(schema)
|
||||||
|
for name := range jt {
|
||||||
|
joinTables[name] = true
|
||||||
|
needsManyToMany = true
|
||||||
|
needsJoinTable = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if needsManyToOne {
|
||||||
|
imports = append(imports, "ManyToOne")
|
||||||
|
}
|
||||||
|
if needsOneToMany {
|
||||||
|
imports = append(imports, "OneToMany")
|
||||||
|
}
|
||||||
|
if needsManyToMany {
|
||||||
|
imports = append(imports, "ManyToMany")
|
||||||
|
}
|
||||||
|
if needsJoinTable {
|
||||||
|
imports = append(imports, "JoinTable")
|
||||||
|
}
|
||||||
|
if needsCreateDate {
|
||||||
|
imports = append(imports, "CreateDateColumn")
|
||||||
|
}
|
||||||
|
if needsUpdateDate {
|
||||||
|
imports = append(imports, "UpdateDateColumn")
|
||||||
|
}
|
||||||
|
if needsViewEntity {
|
||||||
|
imports = append(imports, "ViewEntity")
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("import { %s } from 'typeorm';\n", strings.Join(imports, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
// identifyJoinTables identifies tables that are join tables for M2M relations
|
||||||
|
func (w *Writer) identifyJoinTables(schema *models.Schema) map[string]bool {
|
||||||
|
joinTables := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
// Check if this is a join table:
|
||||||
|
// 1. Has exactly 2 FK constraints
|
||||||
|
// 2. Has composite PK with those 2 columns
|
||||||
|
// 3. Has no other columns except the FK columns
|
||||||
|
|
||||||
|
fks := table.GetForeignKeys()
|
||||||
|
if len(fks) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if columns are only the FK columns
|
||||||
|
if len(table.Columns) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if both FK columns are part of PK
|
||||||
|
pkCols := 0
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
if col.IsPrimaryKey {
|
||||||
|
pkCols++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pkCols == 2 {
|
||||||
|
joinTables[table.Name] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return joinTables
|
||||||
|
}
|
||||||
|
|
||||||
|
// tableToEntity converts a Table to a TypeORM entity class
|
||||||
|
func (w *Writer) tableToEntity(table *models.Table, schema *models.Schema, joinTables map[string]bool) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Generate @Entity decorator with options
|
||||||
|
entityOptions := w.buildEntityOptions(table)
|
||||||
|
sb.WriteString(fmt.Sprintf("@Entity({\n%s\n})\n", entityOptions))
|
||||||
|
|
||||||
|
// Get class name (from metadata if different from table name)
|
||||||
|
className := table.Name
|
||||||
|
if table.Metadata != nil {
|
||||||
|
if classNameVal, ok := table.Metadata["class_name"]; ok {
|
||||||
|
if classNameStr, ok := classNameVal.(string); ok {
|
||||||
|
className = classNameStr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf("export class %s {\n", className))
|
||||||
|
|
||||||
|
// Collect and sort columns
|
||||||
|
columns := make([]*models.Column, 0, len(table.Columns))
|
||||||
|
for _, col := range table.Columns {
|
||||||
|
// Skip FK columns (they'll be represented as relations)
|
||||||
|
if w.isForeignKeyColumn(col, table) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
columns = append(columns, col)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(columns, func(i, j int) bool {
|
||||||
|
// Put PK first, then alphabetical
|
||||||
|
if columns[i].IsPrimaryKey && !columns[j].IsPrimaryKey {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !columns[i].IsPrimaryKey && columns[j].IsPrimaryKey {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return columns[i].Name < columns[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
// Write scalar fields
|
||||||
|
for _, col := range columns {
|
||||||
|
sb.WriteString(w.columnToField(col, table))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write relation fields
|
||||||
|
sb.WriteString(w.generateRelationFields(table, schema, joinTables))
|
||||||
|
|
||||||
|
sb.WriteString("}\n")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// viewToEntity converts a View to a TypeORM @ViewEntity class
|
||||||
|
func (w *Writer) viewToEntity(view *models.View) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Generate @ViewEntity decorator with expression
|
||||||
|
sb.WriteString("@ViewEntity({\n")
|
||||||
|
if view.Definition != "" {
|
||||||
|
// Format the SQL expression with proper indentation
|
||||||
|
sb.WriteString(" expression: `\n")
|
||||||
|
sb.WriteString(" ")
|
||||||
|
sb.WriteString(view.Definition)
|
||||||
|
sb.WriteString("\n `,\n")
|
||||||
|
}
|
||||||
|
sb.WriteString("})\n")
|
||||||
|
|
||||||
|
// Generate class
|
||||||
|
sb.WriteString(fmt.Sprintf("export class %s {\n", view.Name))
|
||||||
|
|
||||||
|
// Generate field definitions (without decorators for view fields)
|
||||||
|
columns := make([]*models.Column, 0, len(view.Columns))
|
||||||
|
for _, col := range view.Columns {
|
||||||
|
columns = append(columns, col)
|
||||||
|
}
|
||||||
|
sort.Slice(columns, func(i, j int) bool {
|
||||||
|
return columns[i].Name < columns[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, col := range columns {
|
||||||
|
tsType := w.sqlTypeToTypeScript(col.Type)
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s: %s;\n", col.Name, tsType))
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString("}\n")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// columnToField converts a Column to a TypeORM field
|
||||||
|
func (w *Writer) columnToField(col *models.Column, table *models.Table) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Generate decorator
|
||||||
|
if col.IsPrimaryKey {
|
||||||
|
if col.AutoIncrement {
|
||||||
|
sb.WriteString(" @PrimaryGeneratedColumn('increment')\n")
|
||||||
|
} else if col.Type == "uuid" || strings.Contains(fmt.Sprint(col.Default), "uuid") {
|
||||||
|
sb.WriteString(" @PrimaryGeneratedColumn('uuid')\n")
|
||||||
|
} else {
|
||||||
|
sb.WriteString(" @PrimaryGeneratedColumn()\n")
|
||||||
|
}
|
||||||
|
} else if col.Default == "now()" {
|
||||||
|
sb.WriteString(" @CreateDateColumn()\n")
|
||||||
|
} else if strings.Contains(col.Comment, "auto-update") {
|
||||||
|
sb.WriteString(" @UpdateDateColumn()\n")
|
||||||
|
} else {
|
||||||
|
// Regular @Column decorator
|
||||||
|
options := w.buildColumnOptions(col, table)
|
||||||
|
if options != "" {
|
||||||
|
sb.WriteString(fmt.Sprintf(" @Column({ %s })\n", options))
|
||||||
|
} else {
|
||||||
|
sb.WriteString(" @Column()\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate field declaration
|
||||||
|
tsType := w.sqlTypeToTypeScript(col.Type)
|
||||||
|
nullable := ""
|
||||||
|
if !col.NotNull {
|
||||||
|
nullable = " | null"
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s: %s%s;", col.Name, tsType, nullable))
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildColumnOptions builds the options object for @Column decorator
|
||||||
|
func (w *Writer) buildColumnOptions(col *models.Column, table *models.Table) string {
|
||||||
|
options := make([]string, 0)
|
||||||
|
|
||||||
|
// Type (if not default)
|
||||||
|
if w.needsExplicitType(col.Type) {
|
||||||
|
options = append(options, fmt.Sprintf("type: '%s'", col.Type))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nullable
|
||||||
|
if !col.NotNull {
|
||||||
|
options = append(options, "nullable: true")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique
|
||||||
|
if w.hasUniqueConstraint(col.Name, table) {
|
||||||
|
options = append(options, "unique: true")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default
|
||||||
|
if col.Default != nil && col.Default != "now()" {
|
||||||
|
defaultStr := fmt.Sprint(col.Default)
|
||||||
|
if defaultStr != "" {
|
||||||
|
options = append(options, fmt.Sprintf("default: '%s'", defaultStr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(options, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// needsExplicitType checks if a SQL type needs explicit type declaration
|
||||||
|
func (w *Writer) needsExplicitType(sqlType string) bool {
|
||||||
|
// Types that don't map cleanly to TypeScript types need explicit declaration
|
||||||
|
explicitTypes := []string{"text", "uuid", "jsonb", "bigint"}
|
||||||
|
for _, t := range explicitTypes {
|
||||||
|
if strings.Contains(sqlType, t) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasUniqueConstraint checks if a column has a unique constraint
|
||||||
|
func (w *Writer) hasUniqueConstraint(colName string, table *models.Table) bool {
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.UniqueConstraint &&
|
||||||
|
len(constraint.Columns) == 1 &&
|
||||||
|
constraint.Columns[0] == colName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// sqlTypeToTypeScript converts SQL types to TypeScript types
|
||||||
|
func (w *Writer) sqlTypeToTypeScript(sqlType string) string {
|
||||||
|
typeMap := map[string]string{
|
||||||
|
"text": "string",
|
||||||
|
"varchar": "string",
|
||||||
|
"character varying": "string",
|
||||||
|
"char": "string",
|
||||||
|
"uuid": "string",
|
||||||
|
"boolean": "boolean",
|
||||||
|
"bool": "boolean",
|
||||||
|
"integer": "number",
|
||||||
|
"int": "number",
|
||||||
|
"bigint": "number",
|
||||||
|
"double precision": "number",
|
||||||
|
"float": "number",
|
||||||
|
"decimal": "number",
|
||||||
|
"numeric": "number",
|
||||||
|
"timestamp": "Date",
|
||||||
|
"timestamptz": "Date",
|
||||||
|
"date": "Date",
|
||||||
|
"jsonb": "any",
|
||||||
|
"json": "any",
|
||||||
|
}
|
||||||
|
|
||||||
|
for sqlPattern, tsType := range typeMap {
|
||||||
|
if strings.Contains(strings.ToLower(sqlType), sqlPattern) {
|
||||||
|
return tsType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "any"
|
||||||
|
}
|
||||||
|
|
||||||
|
// isForeignKeyColumn checks if a column is a FK column
|
||||||
|
func (w *Writer) isForeignKeyColumn(col *models.Column, table *models.Table) bool {
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint {
|
||||||
|
for _, fkCol := range constraint.Columns {
|
||||||
|
if fkCol == col.Name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateRelationFields generates relation fields for a table
|
||||||
|
func (w *Writer) generateRelationFields(table *models.Table, schema *models.Schema, joinTables map[string]bool) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
|
// Get all FK constraints
|
||||||
|
fks := table.GetForeignKeys()
|
||||||
|
|
||||||
|
// Generate @ManyToOne fields
|
||||||
|
for _, fk := range fks {
|
||||||
|
relatedTable := fk.ReferencedTable
|
||||||
|
fieldName := strings.ToLower(relatedTable)
|
||||||
|
|
||||||
|
// Determine if nullable
|
||||||
|
isNullable := false
|
||||||
|
for _, fkCol := range fk.Columns {
|
||||||
|
if col, exists := table.Columns[fkCol]; exists {
|
||||||
|
if !col.NotNull {
|
||||||
|
isNullable = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nullable := ""
|
||||||
|
if isNullable {
|
||||||
|
nullable = " | null"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find inverse field name if possible
|
||||||
|
inverseField := w.findInverseFieldName(table.Name, relatedTable, schema)
|
||||||
|
|
||||||
|
if inverseField != "" {
|
||||||
|
sb.WriteString(fmt.Sprintf(" @ManyToOne(() => %s, %s => %s.%s)\n",
|
||||||
|
relatedTable, strings.ToLower(relatedTable), strings.ToLower(relatedTable), inverseField))
|
||||||
|
} else {
|
||||||
|
if isNullable {
|
||||||
|
sb.WriteString(fmt.Sprintf(" @ManyToOne(() => %s, { nullable: true })\n", relatedTable))
|
||||||
|
} else {
|
||||||
|
sb.WriteString(fmt.Sprintf(" @ManyToOne(() => %s)\n", relatedTable))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(fmt.Sprintf(" %s: %s%s;\n", fieldName, relatedTable, nullable))
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate @OneToMany fields (inverse of FKs pointing to this table)
|
||||||
|
w.generateInverseRelations(table, schema, joinTables, &sb)
|
||||||
|
|
||||||
|
// Generate @ManyToMany fields
|
||||||
|
w.generateManyToManyRelations(table, schema, joinTables, &sb)
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// findInverseFieldName finds the inverse field name for a relation
|
||||||
|
func (w *Writer) findInverseFieldName(fromTable, toTable string, schema *models.Schema) string {
|
||||||
|
// Look for tables that have FKs pointing back to fromTable
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if table.Name != toTable {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, constraint := range table.Constraints {
|
||||||
|
if constraint.Type == models.ForeignKeyConstraint && constraint.ReferencedTable == fromTable {
|
||||||
|
// Found an inverse relation
|
||||||
|
// Use pluralized form of fromTable
|
||||||
|
return w.pluralize(strings.ToLower(fromTable))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateInverseRelations generates @OneToMany fields
|
||||||
|
func (w *Writer) generateInverseRelations(table *models.Table, schema *models.Schema, joinTables map[string]bool, sb *strings.Builder) {
|
||||||
|
for _, otherTable := range schema.Tables {
|
||||||
|
if otherTable.Name == table.Name || joinTables[otherTable.Name] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fk := range otherTable.GetForeignKeys() {
|
||||||
|
if fk.ReferencedTable == table.Name {
|
||||||
|
// This table is referenced by otherTable
|
||||||
|
fieldName := w.pluralize(strings.ToLower(otherTable.Name))
|
||||||
|
inverseName := strings.ToLower(table.Name)
|
||||||
|
|
||||||
|
fmt.Fprintf(sb, " @OneToMany(() => %s, %s => %s.%s)\n",
|
||||||
|
otherTable.Name, strings.ToLower(otherTable.Name), strings.ToLower(otherTable.Name), inverseName)
|
||||||
|
fmt.Fprintf(sb, " %s: %s[];\n", fieldName, otherTable.Name)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateManyToManyRelations generates @ManyToMany fields
|
||||||
|
func (w *Writer) generateManyToManyRelations(table *models.Table, schema *models.Schema, joinTables map[string]bool, sb *strings.Builder) {
|
||||||
|
for joinTableName := range joinTables {
|
||||||
|
joinTable := w.findTable(joinTableName, schema)
|
||||||
|
if joinTable == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fks := joinTable.GetForeignKeys()
|
||||||
|
if len(fks) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this table is part of the M2M relation
|
||||||
|
var thisTableFK *models.Constraint
|
||||||
|
var otherTableFK *models.Constraint
|
||||||
|
|
||||||
|
for i, fk := range fks {
|
||||||
|
if fk.ReferencedTable == table.Name {
|
||||||
|
thisTableFK = fk
|
||||||
|
if i == 0 {
|
||||||
|
otherTableFK = fks[1]
|
||||||
|
} else {
|
||||||
|
otherTableFK = fks[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if thisTableFK == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine which side owns the relation (has @JoinTable)
|
||||||
|
// We'll make the first entity alphabetically the owner
|
||||||
|
isOwner := table.Name < otherTableFK.ReferencedTable
|
||||||
|
|
||||||
|
otherTable := otherTableFK.ReferencedTable
|
||||||
|
fieldName := w.pluralize(strings.ToLower(otherTable))
|
||||||
|
inverseName := w.pluralize(strings.ToLower(table.Name))
|
||||||
|
|
||||||
|
if isOwner {
|
||||||
|
fmt.Fprintf(sb, " @ManyToMany(() => %s, %s => %s.%s)\n",
|
||||||
|
otherTable, strings.ToLower(otherTable), strings.ToLower(otherTable), inverseName)
|
||||||
|
sb.WriteString(" @JoinTable()\n")
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(sb, " @ManyToMany(() => %s, %s => %s.%s)\n",
|
||||||
|
otherTable, strings.ToLower(otherTable), strings.ToLower(otherTable), inverseName)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(sb, " %s: %s[];\n", fieldName, otherTable)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// findTable finds a table by name in a schema
|
||||||
|
func (w *Writer) findTable(name string, schema *models.Schema) *models.Table {
|
||||||
|
for _, table := range schema.Tables {
|
||||||
|
if table.Name == name {
|
||||||
|
return table
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildEntityOptions builds the options object for @Entity decorator
|
||||||
|
func (w *Writer) buildEntityOptions(table *models.Table) string {
|
||||||
|
options := make([]string, 0)
|
||||||
|
|
||||||
|
// Always include table name
|
||||||
|
options = append(options, fmt.Sprintf(" name: \"%s\"", table.Name))
|
||||||
|
|
||||||
|
// Always include schema
|
||||||
|
options = append(options, fmt.Sprintf(" schema: \"%s\"", table.Schema))
|
||||||
|
|
||||||
|
// Database name from metadata
|
||||||
|
if table.Metadata != nil {
|
||||||
|
if database, ok := table.Metadata["database"]; ok {
|
||||||
|
if databaseStr, ok := database.(string); ok {
|
||||||
|
options = append(options, fmt.Sprintf(" database: \"%s\"", databaseStr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Engine from metadata
|
||||||
|
if engine, ok := table.Metadata["engine"]; ok {
|
||||||
|
if engineStr, ok := engine.(string); ok {
|
||||||
|
options = append(options, fmt.Sprintf(" engine: \"%s\"", engineStr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(options, ",\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// pluralize adds 's' to make a word plural (simple version)
|
||||||
|
func (w *Writer) pluralize(word string) string {
|
||||||
|
if strings.HasSuffix(word, "s") {
|
||||||
|
return word
|
||||||
|
}
|
||||||
|
return word + "s"
|
||||||
|
}
|
||||||
212
pkg/writers/yaml/README.md
Normal file
212
pkg/writers/yaml/README.md
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
# YAML Writer
|
||||||
|
|
||||||
|
Generates database schema definitions in YAML format.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The YAML Writer converts RelSpec's internal database model representation into YAML format, providing a human-readable, structured representation of the database schema.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Generates RelSpec's canonical YAML schema format
|
||||||
|
- Human-readable alternative to JSON
|
||||||
|
- Complete schema representation including:
|
||||||
|
- Databases and schemas
|
||||||
|
- Tables, columns, and data types
|
||||||
|
- Constraints (PK, FK, unique, check)
|
||||||
|
- Indexes
|
||||||
|
- Relationships
|
||||||
|
- Views and sequences
|
||||||
|
- Supports comments
|
||||||
|
- Ideal for manual editing and configuration
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||||
|
"git.warky.dev/wdevs/relspecgo/pkg/writers/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
options := &writers.WriterOptions{
|
||||||
|
OutputPath: "schema.yaml",
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := yaml.NewWriter(options)
|
||||||
|
err := writer.WriteDatabase(db)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Export PostgreSQL database to YAML
|
||||||
|
relspec --input pgsql \
|
||||||
|
--conn "postgres://localhost/mydb" \
|
||||||
|
--output yaml \
|
||||||
|
--out-file schema.yaml
|
||||||
|
|
||||||
|
# Convert GORM models to YAML
|
||||||
|
relspec --input gorm --in-file models.go --output yaml --out-file schema.yaml
|
||||||
|
|
||||||
|
# Convert JSON to YAML
|
||||||
|
relspec --input json --in-file schema.json --output yaml --out-file schema.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generated YAML Example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
name: myapp
|
||||||
|
database_type: postgresql
|
||||||
|
source_format: pgsql
|
||||||
|
schemas:
|
||||||
|
- name: public
|
||||||
|
tables:
|
||||||
|
- name: users
|
||||||
|
schema: public
|
||||||
|
columns:
|
||||||
|
id:
|
||||||
|
name: id
|
||||||
|
table: users
|
||||||
|
schema: public
|
||||||
|
type: bigint
|
||||||
|
not_null: true
|
||||||
|
is_primary_key: true
|
||||||
|
auto_increment: true
|
||||||
|
sequence: 1
|
||||||
|
username:
|
||||||
|
name: username
|
||||||
|
table: users
|
||||||
|
schema: public
|
||||||
|
type: varchar
|
||||||
|
length: 50
|
||||||
|
not_null: true
|
||||||
|
sequence: 2
|
||||||
|
email:
|
||||||
|
name: email
|
||||||
|
table: users
|
||||||
|
schema: public
|
||||||
|
type: varchar
|
||||||
|
length: 100
|
||||||
|
not_null: true
|
||||||
|
sequence: 3
|
||||||
|
constraints:
|
||||||
|
pk_users:
|
||||||
|
name: pk_users
|
||||||
|
type: PRIMARY KEY
|
||||||
|
table: users
|
||||||
|
schema: public
|
||||||
|
columns:
|
||||||
|
- id
|
||||||
|
uq_users_username:
|
||||||
|
name: uq_users_username
|
||||||
|
type: UNIQUE
|
||||||
|
table: users
|
||||||
|
schema: public
|
||||||
|
columns:
|
||||||
|
- username
|
||||||
|
indexes:
|
||||||
|
idx_users_email:
|
||||||
|
name: idx_users_email
|
||||||
|
table: users
|
||||||
|
schema: public
|
||||||
|
columns:
|
||||||
|
- email
|
||||||
|
unique: false
|
||||||
|
type: btree
|
||||||
|
|
||||||
|
- name: posts
|
||||||
|
schema: public
|
||||||
|
columns:
|
||||||
|
id:
|
||||||
|
name: id
|
||||||
|
type: bigint
|
||||||
|
not_null: true
|
||||||
|
is_primary_key: true
|
||||||
|
sequence: 1
|
||||||
|
user_id:
|
||||||
|
name: user_id
|
||||||
|
type: bigint
|
||||||
|
not_null: true
|
||||||
|
sequence: 2
|
||||||
|
title:
|
||||||
|
name: title
|
||||||
|
type: varchar
|
||||||
|
length: 200
|
||||||
|
not_null: true
|
||||||
|
sequence: 3
|
||||||
|
content:
|
||||||
|
name: content
|
||||||
|
type: text
|
||||||
|
not_null: false
|
||||||
|
sequence: 4
|
||||||
|
constraints:
|
||||||
|
fk_posts_user_id:
|
||||||
|
name: fk_posts_user_id
|
||||||
|
type: FOREIGN KEY
|
||||||
|
table: posts
|
||||||
|
schema: public
|
||||||
|
columns:
|
||||||
|
- user_id
|
||||||
|
referenced_table: users
|
||||||
|
referenced_schema: public
|
||||||
|
referenced_columns:
|
||||||
|
- id
|
||||||
|
on_delete: CASCADE
|
||||||
|
on_update: NO ACTION
|
||||||
|
indexes:
|
||||||
|
idx_posts_user_id:
|
||||||
|
name: idx_posts_user_id
|
||||||
|
columns:
|
||||||
|
- user_id
|
||||||
|
unique: false
|
||||||
|
type: btree
|
||||||
|
views: []
|
||||||
|
sequences: []
|
||||||
|
```
|
||||||
|
|
||||||
|
## Schema Structure
|
||||||
|
|
||||||
|
The YAML format mirrors the JSON structure with human-readable syntax:
|
||||||
|
|
||||||
|
- Database level: `name`, `database_type`, `source_format`, `schemas`
|
||||||
|
- Schema level: `name`, `tables`, `views`, `sequences`
|
||||||
|
- Table level: `name`, `schema`, `columns`, `constraints`, `indexes`
|
||||||
|
- Column level: `name`, `type`, `length`, `not_null`, etc.
|
||||||
|
- Constraint level: `name`, `type`, `columns`, foreign key details
|
||||||
|
- Index level: `name`, `columns`, `unique`, `type`
|
||||||
|
|
||||||
|
## Advantages Over JSON
|
||||||
|
|
||||||
|
- More human-readable
|
||||||
|
- Easier to edit manually
|
||||||
|
- Supports comments
|
||||||
|
- Less verbose (no braces/brackets)
|
||||||
|
- Better for configuration files
|
||||||
|
- Natural indentation
|
||||||
|
|
||||||
|
## Use Cases
|
||||||
|
|
||||||
|
- **Configuration** - Schema as configuration
|
||||||
|
- **Documentation** - Human-readable schema docs
|
||||||
|
- **Version Control** - Easier to read diffs
|
||||||
|
- **Manual Editing** - Easier to modify by hand
|
||||||
|
- **Code Generation** - Template-friendly format
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Output is properly indented (2 spaces)
|
||||||
|
- Preserves all schema metadata
|
||||||
|
- Can be round-tripped with YAML reader
|
||||||
|
- Compatible with YAML 1.2
|
||||||
|
- More readable than JSON for large schemas
|
||||||
|
- Ideal for documentation and manual workflows
|
||||||
60
tests/assets/bun/complex.go
Normal file
60
tests/assets/bun/complex.go
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ModelUser represents a user in the system
|
||||||
|
type ModelUser struct {
|
||||||
|
bun.BaseModel `bun:"table:users,alias:u"`
|
||||||
|
|
||||||
|
ID int64 `bun:"id,pk,autoincrement,type:bigint"`
|
||||||
|
Username string `bun:"username,notnull,type:varchar(100),unique:idx_username"`
|
||||||
|
Email string `bun:"email,notnull,type:varchar(255),unique"`
|
||||||
|
Password string `bun:"password,notnull,type:varchar(255)"`
|
||||||
|
FirstName *string `bun:"first_name,type:varchar(100)"`
|
||||||
|
LastName *string `bun:"last_name,type:varchar(100)"`
|
||||||
|
Bio *string `bun:"bio,type:text"`
|
||||||
|
IsActive bool `bun:"is_active,type:boolean"`
|
||||||
|
CreatedAt time.Time `bun:"created_at,type:timestamp"`
|
||||||
|
UpdatedAt time.Time `bun:"updated_at,type:timestamp"`
|
||||||
|
|
||||||
|
Posts []*ModelPost `bun:"rel:has-many,join:id=user_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelPost represents a blog post
|
||||||
|
type ModelPost struct {
|
||||||
|
bun.BaseModel `bun:"table:posts,alias:p"`
|
||||||
|
|
||||||
|
ID int64 `bun:"id,pk,autoincrement,type:bigint"`
|
||||||
|
UserID int64 `bun:"user_id,notnull,type:bigint"`
|
||||||
|
Title string `bun:"title,notnull,type:varchar(255)"`
|
||||||
|
Slug string `bun:"slug,notnull,type:varchar(255),unique:idx_slug"`
|
||||||
|
Content string `bun:"content,notnull,type:text"`
|
||||||
|
Excerpt *string `bun:"excerpt,type:text"`
|
||||||
|
Published bool `bun:"published,type:boolean"`
|
||||||
|
ViewCount int64 `bun:"view_count,type:bigint"`
|
||||||
|
PublishedAt *time.Time `bun:"published_at,type:timestamp,nullzero"`
|
||||||
|
CreatedAt time.Time `bun:"created_at,type:timestamp"`
|
||||||
|
UpdatedAt time.Time `bun:"updated_at,type:timestamp"`
|
||||||
|
|
||||||
|
User *ModelUser `bun:"rel:belongs-to,join:user_id=id"`
|
||||||
|
Comments []*ModelComment `bun:"rel:has-many,join:id=post_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelComment represents a comment on a post
|
||||||
|
type ModelComment struct {
|
||||||
|
bun.BaseModel `bun:"table:comments,alias:c"`
|
||||||
|
|
||||||
|
ID int64 `bun:"id,pk,autoincrement,type:bigint"`
|
||||||
|
PostID int64 `bun:"post_id,notnull,type:bigint"`
|
||||||
|
UserID *int64 `bun:"user_id,type:bigint"`
|
||||||
|
Content string `bun:"content,notnull,type:text"`
|
||||||
|
CreatedAt time.Time `bun:"created_at,type:timestamp"`
|
||||||
|
UpdatedAt time.Time `bun:"updated_at,type:timestamp"`
|
||||||
|
|
||||||
|
Post *ModelPost `bun:"rel:belongs-to,join:post_id=id"`
|
||||||
|
User *ModelUser `bun:"rel:belongs-to,join:user_id=id"`
|
||||||
|
}
|
||||||
18
tests/assets/bun/simple.go
Normal file
18
tests/assets/bun/simple.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
bun.BaseModel `bun:"table:users,alias:u"`
|
||||||
|
|
||||||
|
ID int64 `bun:"id,pk,autoincrement,type:bigint"`
|
||||||
|
Email string `bun:"email,notnull,type:varchar(255),unique"`
|
||||||
|
Name string `bun:"name,type:text"`
|
||||||
|
Age *int `bun:"age,type:integer"`
|
||||||
|
IsActive bool `bun:"is_active,type:boolean"`
|
||||||
|
CreatedAt time.Time `bun:"created_at,type:timestamp,default:now()"`
|
||||||
|
}
|
||||||
156
tests/assets/drizzle/schema-updated.ts
Normal file
156
tests/assets/drizzle/schema-updated.ts
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
// Code generated by relspecgo. DO NOT EDIT.
|
||||||
|
import { pgTable, pgEnum, integer, bigint, smallint, serial, bigserial, smallserial, text, varchar, char, boolean, numeric, real, doublePrecision, timestamp, date, time, interval, json, jsonb, uuid, bytea } from 'drizzle-orm/pg-core';
|
||||||
|
import { sql } from 'drizzle-orm';
|
||||||
|
|
||||||
|
|
||||||
|
// Enums
|
||||||
|
export const userRole = pgEnum('UserRole', ['admin', 'user', 'moderator', 'guest']);
|
||||||
|
export const orderStatus = pgEnum('OrderStatus', ['pending', 'processing', 'shipped', 'delivered', 'cancelled']);
|
||||||
|
|
||||||
|
|
||||||
|
// Table: users
|
||||||
|
export const users = pgTable('users', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
email: varchar('email').notNull().unique(),
|
||||||
|
isActive: boolean('is_active').notNull().default(true),
|
||||||
|
lastLoginAt: timestamp('last_login_at'),
|
||||||
|
passwordHash: varchar('password_hash').notNull(),
|
||||||
|
profile: jsonb('profile'),
|
||||||
|
role: pgEnum('UserRole')('role').notNull(),
|
||||||
|
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
|
||||||
|
username: varchar('username').notNull().unique(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for users
|
||||||
|
export type Users = typeof users.$inferSelect;
|
||||||
|
export type NewUsers = typeof users.$inferInsert;
|
||||||
|
// Table: profiles
|
||||||
|
export const profiles = pgTable('profiles', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
avatarUrl: varchar('avatar_url'),
|
||||||
|
bio: text('bio'),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
dateOfBirth: date('date_of_birth'),
|
||||||
|
firstName: varchar('first_name'),
|
||||||
|
lastName: varchar('last_name'),
|
||||||
|
phoneNumber: varchar('phone_number'),
|
||||||
|
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
|
||||||
|
userId: integer('user_id').notNull().unique().references(() => users.id),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for profiles
|
||||||
|
export type Profiles = typeof profiles.$inferSelect;
|
||||||
|
export type NewProfiles = typeof profiles.$inferInsert;
|
||||||
|
// Table: posts
|
||||||
|
export const posts = pgTable('posts', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
authorId: integer('author_id').notNull().references(() => users.id),
|
||||||
|
content: text('content').notNull(),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
excerpt: text('excerpt'),
|
||||||
|
featuredImage: varchar('featured_image'),
|
||||||
|
isPublished: boolean('is_published').notNull().default(false),
|
||||||
|
publishedAt: timestamp('published_at'),
|
||||||
|
slug: varchar('slug').notNull().unique(),
|
||||||
|
title: varchar('title').notNull(),
|
||||||
|
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
|
||||||
|
viewCount: integer('view_count').notNull().default(0),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for posts
|
||||||
|
export type Posts = typeof posts.$inferSelect;
|
||||||
|
export type NewPosts = typeof posts.$inferInsert;
|
||||||
|
// Table: comments
|
||||||
|
export const comments = pgTable('comments', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
authorId: integer('author_id').notNull().references(() => users.id),
|
||||||
|
content: text('content').notNull(),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
isApproved: boolean('is_approved').notNull().default(false),
|
||||||
|
parentId: integer('parent_id').references(() => comments.id),
|
||||||
|
postId: integer('post_id').notNull().references(() => posts.id),
|
||||||
|
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for comments
|
||||||
|
export type Comments = typeof comments.$inferSelect;
|
||||||
|
export type NewComments = typeof comments.$inferInsert;
|
||||||
|
// Table: categories
|
||||||
|
export const categories = pgTable('categories', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
description: text('description'),
|
||||||
|
name: varchar('name').notNull().unique(),
|
||||||
|
parentId: integer('parent_id').references(() => categories.id),
|
||||||
|
slug: varchar('slug').notNull().unique(),
|
||||||
|
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for categories
|
||||||
|
export type Categories = typeof categories.$inferSelect;
|
||||||
|
export type NewCategories = typeof categories.$inferInsert;
|
||||||
|
// Table: post_categories
|
||||||
|
export const postCategories = pgTable('post_categories', {
|
||||||
|
categoryId: integer('category_id').notNull().references(() => categories.id),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
postId: integer('post_id').notNull().references(() => posts.id),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for post_categories
|
||||||
|
export type PostCategories = typeof postCategories.$inferSelect;
|
||||||
|
export type NewPostCategories = typeof postCategories.$inferInsert;
|
||||||
|
// Table: tags
|
||||||
|
export const tags = pgTable('tags', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
name: varchar('name').notNull().unique(),
|
||||||
|
slug: varchar('slug').notNull().unique(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for tags
|
||||||
|
export type Tags = typeof tags.$inferSelect;
|
||||||
|
export type NewTags = typeof tags.$inferInsert;
|
||||||
|
// Table: post_tags
|
||||||
|
export const postTags = pgTable('post_tags', {
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
postId: integer('post_id').notNull().references(() => posts.id),
|
||||||
|
tagId: integer('tag_id').notNull().references(() => tags.id),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for post_tags
|
||||||
|
export type PostTags = typeof postTags.$inferSelect;
|
||||||
|
export type NewPostTags = typeof postTags.$inferInsert;
|
||||||
|
// Table: orders
|
||||||
|
export const orders = pgTable('orders', {
|
||||||
|
id: serial('id').primaryKey(),
|
||||||
|
billingAddress: jsonb('billing_address').notNull(),
|
||||||
|
completedAt: timestamp('completed_at'),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
currency: varchar('currency').notNull().default('USD'),
|
||||||
|
notes: text('notes'),
|
||||||
|
orderNumber: varchar('order_number').notNull().unique(),
|
||||||
|
shippingAddress: jsonb('shipping_address').notNull(),
|
||||||
|
status: pgEnum('OrderStatus')('status').notNull().default('pending'),
|
||||||
|
totalAmount: numeric('total_amount').notNull(),
|
||||||
|
updatedAt: timestamp('updated_at').notNull().default(sql`now()`),
|
||||||
|
userId: integer('user_id').notNull().references(() => users.id),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for orders
|
||||||
|
export type Orders = typeof orders.$inferSelect;
|
||||||
|
export type NewOrders = typeof orders.$inferInsert;
|
||||||
|
// Table: sessions
|
||||||
|
export const sessions = pgTable('sessions', {
|
||||||
|
id: uuid('id').primaryKey().default(sql`gen_random_uuid()`),
|
||||||
|
createdAt: timestamp('created_at').notNull().default(sql`now()`),
|
||||||
|
expiresAt: timestamp('expires_at').notNull(),
|
||||||
|
ipAddress: varchar('ip_address'),
|
||||||
|
token: varchar('token').notNull().unique(),
|
||||||
|
userAgent: text('user_agent'),
|
||||||
|
userId: integer('user_id').notNull().references(() => users.id),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Types for sessions
|
||||||
|
export type Sessions = typeof sessions.$inferSelect;
|
||||||
|
export type NewSessions = typeof sessions.$inferInsert;
|
||||||
90
tests/assets/drizzle/schema.ts
Normal file
90
tests/assets/drizzle/schema.ts
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
// Code generated by relspecgo. DO NOT EDIT.
|
||||||
|
import { pgTable, pgEnum, integer, bigint, smallint, serial, bigserial, smallserial, text, varchar, char, boolean, numeric, real, doublePrecision, timestamp, date, time, interval, json, jsonb, uuid, bytea } from 'drizzle-orm/pg-core';
|
||||||
|
import { sql } from 'drizzle-orm';
|
||||||
|
|
||||||
|
|
||||||
|
// Enums
|
||||||
|
export const role = pgEnum('Role', ['USER', 'ADMIN']);
|
||||||
|
export type Role = 'USER' | 'ADMIN';
|
||||||
|
|
||||||
|
|
||||||
|
// Table: User
|
||||||
|
export interface User {
|
||||||
|
id: number;
|
||||||
|
email: string;
|
||||||
|
name: string | null;
|
||||||
|
profile: string | null;
|
||||||
|
role: Role;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const user = pgTable('User', {
|
||||||
|
id: integer('id').primaryKey().generatedAlwaysAsIdentity(),
|
||||||
|
email: text('email').notNull().unique(),
|
||||||
|
name: text('name'),
|
||||||
|
profile: text('profile'),
|
||||||
|
role: pgEnum('Role')('role').notNull().default('USER'),
|
||||||
|
});
|
||||||
|
|
||||||
|
export type NewUser = typeof user.$inferInsert;
|
||||||
|
// Table: Profile
|
||||||
|
export interface Profile {
|
||||||
|
id: number;
|
||||||
|
bio: string;
|
||||||
|
user: string;
|
||||||
|
userId: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const profile = pgTable('Profile', {
|
||||||
|
id: integer('id').primaryKey().generatedAlwaysAsIdentity(),
|
||||||
|
bio: text('bio').notNull(),
|
||||||
|
user: text('user').notNull(),
|
||||||
|
userId: integer('userId').notNull().unique().references(() => user.id),
|
||||||
|
});
|
||||||
|
|
||||||
|
export type NewProfile = typeof profile.$inferInsert;
|
||||||
|
// Table: Post
|
||||||
|
export interface Post {
|
||||||
|
id: number;
|
||||||
|
author: string;
|
||||||
|
authorId: number;
|
||||||
|
createdAt: Date;
|
||||||
|
published: boolean;
|
||||||
|
title: string;
|
||||||
|
updatedAt: Date; // @updatedAt
|
||||||
|
}
|
||||||
|
|
||||||
|
export const post = pgTable('Post', {
|
||||||
|
id: integer('id').primaryKey().generatedAlwaysAsIdentity(),
|
||||||
|
author: text('author').notNull(),
|
||||||
|
authorId: integer('authorId').notNull().references(() => user.id),
|
||||||
|
createdAt: timestamp('createdAt').notNull().default(sql`now()`),
|
||||||
|
published: boolean('published').notNull().default(false),
|
||||||
|
title: text('title').notNull(),
|
||||||
|
updatedAt: timestamp('updatedAt').notNull(), // @updatedAt
|
||||||
|
});
|
||||||
|
|
||||||
|
export type NewPost = typeof post.$inferInsert;
|
||||||
|
// Table: Category
|
||||||
|
export interface Category {
|
||||||
|
id: number;
|
||||||
|
name: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const category = pgTable('Category', {
|
||||||
|
id: integer('id').primaryKey().generatedAlwaysAsIdentity(),
|
||||||
|
name: text('name').notNull(),
|
||||||
|
});
|
||||||
|
|
||||||
|
export type NewCategory = typeof category.$inferInsert;
|
||||||
|
// Table: _CategoryToPost
|
||||||
|
export interface Categorytopost {
|
||||||
|
categoryId: number;
|
||||||
|
postId: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const Categorytopost = pgTable('_CategoryToPost', {
|
||||||
|
categoryId: integer('CategoryId').primaryKey().references(() => category.id),
|
||||||
|
postId: integer('PostId').primaryKey().references(() => post.id),
|
||||||
|
});
|
||||||
|
|
||||||
|
export type NewCategorytopost = typeof Categorytopost.$inferInsert;
|
||||||
65
tests/assets/gorm/complex.go
Normal file
65
tests/assets/gorm/complex.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ModelUser represents a user in the system
|
||||||
|
type ModelUser struct {
|
||||||
|
ID int64 `gorm:"column:id;primaryKey;autoIncrement;type:bigint"`
|
||||||
|
Username string `gorm:"column:username;type:varchar(100);not null;uniqueIndex:idx_username"`
|
||||||
|
Email string `gorm:"column:email;type:varchar(255);not null;uniqueIndex"`
|
||||||
|
Password string `gorm:"column:password;type:varchar(255);not null"`
|
||||||
|
FirstName *string `gorm:"column:first_name;type:varchar(100)"`
|
||||||
|
LastName *string `gorm:"column:last_name;type:varchar(100)"`
|
||||||
|
Bio *string `gorm:"column:bio;type:text"`
|
||||||
|
IsActive bool `gorm:"column:is_active;type:boolean;default:true"`
|
||||||
|
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;default:now()"`
|
||||||
|
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;default:now()"`
|
||||||
|
|
||||||
|
Posts []*ModelPost `gorm:"foreignKey:UserID;association_foreignkey:ID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE"`
|
||||||
|
Comments []*ModelComment `gorm:"foreignKey:UserID;association_foreignkey:ID;constraint:OnDelete:SET NULL"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ModelUser) TableName() string {
|
||||||
|
return "users"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelPost represents a blog post
|
||||||
|
type ModelPost struct {
|
||||||
|
ID int64 `gorm:"column:id;primaryKey;autoIncrement;type:bigint"`
|
||||||
|
UserID int64 `gorm:"column:user_id;type:bigint;not null;index:idx_user_id"`
|
||||||
|
Title string `gorm:"column:title;type:varchar(255);not null"`
|
||||||
|
Slug string `gorm:"column:slug;type:varchar(255);not null;uniqueIndex:idx_slug"`
|
||||||
|
Content string `gorm:"column:content;type:text;not null"`
|
||||||
|
Excerpt *string `gorm:"column:excerpt;type:text"`
|
||||||
|
Published bool `gorm:"column:published;type:boolean;default:false"`
|
||||||
|
ViewCount int64 `gorm:"column:view_count;type:bigint;default:0"`
|
||||||
|
PublishedAt *time.Time `gorm:"column:published_at;type:timestamp"`
|
||||||
|
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;default:now()"`
|
||||||
|
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;default:now()"`
|
||||||
|
|
||||||
|
User *ModelUser `gorm:"foreignKey:UserID;references:ID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE"`
|
||||||
|
Comments []*ModelComment `gorm:"foreignKey:PostID;association_foreignkey:ID;constraint:OnDelete:CASCADE"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ModelPost) TableName() string {
|
||||||
|
return "posts"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelComment represents a comment on a post
|
||||||
|
type ModelComment struct {
|
||||||
|
ID int64 `gorm:"column:id;primaryKey;autoIncrement;type:bigint"`
|
||||||
|
PostID int64 `gorm:"column:post_id;type:bigint;not null;index:idx_post_id"`
|
||||||
|
UserID *int64 `gorm:"column:user_id;type:bigint;index:idx_user_id"`
|
||||||
|
Content string `gorm:"column:content;type:text;not null"`
|
||||||
|
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;default:now()"`
|
||||||
|
UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;default:now()"`
|
||||||
|
|
||||||
|
Post *ModelPost `gorm:"foreignKey:PostID;references:ID;constraint:OnDelete:CASCADE"`
|
||||||
|
User *ModelUser `gorm:"foreignKey:UserID;references:ID;constraint:OnDelete:SET NULL"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ModelComment) TableName() string {
|
||||||
|
return "comments"
|
||||||
|
}
|
||||||
18
tests/assets/gorm/simple.go
Normal file
18
tests/assets/gorm/simple.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
ID int64 `gorm:"column:id;primaryKey;autoIncrement;type:bigint"`
|
||||||
|
Email string `gorm:"column:email;type:varchar(255);not null"`
|
||||||
|
Name string `gorm:"column:name;type:text"`
|
||||||
|
Age *int `gorm:"column:age;type:integer"`
|
||||||
|
IsActive bool `gorm:"column:is_active;type:boolean"`
|
||||||
|
CreatedAt time.Time `gorm:"column:created_at;type:timestamp;default:now()"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (User) TableName() string {
|
||||||
|
return "users"
|
||||||
|
}
|
||||||
46
tests/assets/graphql/complex.graphql
Normal file
46
tests/assets/graphql/complex.graphql
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# Complex GraphQL schema with multiple features
|
||||||
|
|
||||||
|
scalar DateTime
|
||||||
|
scalar JSON
|
||||||
|
scalar Date
|
||||||
|
|
||||||
|
enum Role {
|
||||||
|
USER
|
||||||
|
ADMIN
|
||||||
|
MODERATOR
|
||||||
|
}
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
email: String!
|
||||||
|
name: String!
|
||||||
|
role: Role!
|
||||||
|
createdAt: DateTime!
|
||||||
|
posts: [Post!]!
|
||||||
|
profile: Profile
|
||||||
|
}
|
||||||
|
|
||||||
|
type Profile {
|
||||||
|
id: ID!
|
||||||
|
bio: String
|
||||||
|
avatar: String
|
||||||
|
metadata: JSON
|
||||||
|
user: User!
|
||||||
|
}
|
||||||
|
|
||||||
|
type Post {
|
||||||
|
id: ID!
|
||||||
|
title: String!
|
||||||
|
slug: String!
|
||||||
|
content: String
|
||||||
|
published: Boolean!
|
||||||
|
publishedAt: Date
|
||||||
|
author: User!
|
||||||
|
tags: [Tag!]!
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tag {
|
||||||
|
id: ID!
|
||||||
|
name: String!
|
||||||
|
posts: [Post!]!
|
||||||
|
}
|
||||||
13
tests/assets/graphql/custom_scalars.graphql
Normal file
13
tests/assets/graphql/custom_scalars.graphql
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# GraphQL schema with custom scalars
|
||||||
|
|
||||||
|
scalar DateTime
|
||||||
|
scalar JSON
|
||||||
|
scalar Date
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
email: String!
|
||||||
|
createdAt: DateTime!
|
||||||
|
metadata: JSON
|
||||||
|
birthDate: Date
|
||||||
|
}
|
||||||
13
tests/assets/graphql/enums.graphql
Normal file
13
tests/assets/graphql/enums.graphql
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# GraphQL schema with enums
|
||||||
|
|
||||||
|
enum Role {
|
||||||
|
ADMIN
|
||||||
|
USER
|
||||||
|
GUEST
|
||||||
|
}
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
email: String!
|
||||||
|
role: Role!
|
||||||
|
}
|
||||||
16
tests/assets/graphql/relations.graphql
Normal file
16
tests/assets/graphql/relations.graphql
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# GraphQL schema with relationships
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
email: String!
|
||||||
|
name: String!
|
||||||
|
posts: [Post!]!
|
||||||
|
}
|
||||||
|
|
||||||
|
type Post {
|
||||||
|
id: ID!
|
||||||
|
title: String!
|
||||||
|
content: String
|
||||||
|
published: Boolean!
|
||||||
|
author: User!
|
||||||
|
}
|
||||||
9
tests/assets/graphql/simple.graphql
Normal file
9
tests/assets/graphql/simple.graphql
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Simple GraphQL schema for testing basic type parsing
|
||||||
|
|
||||||
|
type User {
|
||||||
|
id: ID!
|
||||||
|
email: String!
|
||||||
|
name: String
|
||||||
|
age: Int
|
||||||
|
active: Boolean!
|
||||||
|
}
|
||||||
46
tests/assets/prisma/example.prisma
Normal file
46
tests/assets/prisma/example.prisma
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
datasource db {
|
||||||
|
provider = "postgresql"
|
||||||
|
}
|
||||||
|
|
||||||
|
generator client {
|
||||||
|
provider = "prisma-client"
|
||||||
|
output = "./generated"
|
||||||
|
}
|
||||||
|
|
||||||
|
model User {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
email String @unique
|
||||||
|
name String?
|
||||||
|
role Role @default(USER)
|
||||||
|
posts Post[]
|
||||||
|
profile Profile?
|
||||||
|
}
|
||||||
|
|
||||||
|
model Profile {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
bio String
|
||||||
|
user User @relation(fields: [userId], references: [id])
|
||||||
|
userId Int @unique
|
||||||
|
}
|
||||||
|
|
||||||
|
model Post {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
createdAt DateTime @default(now())
|
||||||
|
updatedAt DateTime @updatedAt
|
||||||
|
title String
|
||||||
|
published Boolean @default(false)
|
||||||
|
author User @relation(fields: [authorId], references: [id])
|
||||||
|
authorId Int
|
||||||
|
categories Category[]
|
||||||
|
}
|
||||||
|
|
||||||
|
model Category {
|
||||||
|
id Int @id @default(autoincrement())
|
||||||
|
name String
|
||||||
|
posts Post[]
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Role {
|
||||||
|
USER
|
||||||
|
ADMIN
|
||||||
|
}
|
||||||
115
tests/assets/typeorm/example.ts
Normal file
115
tests/assets/typeorm/example.ts
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
//@ts-nocheck
|
||||||
|
import { Entity, PrimaryGeneratedColumn, Column, ManyToOne, OneToMany, ManyToMany, JoinTable, CreateDateColumn, UpdateDateColumn } from 'typeorm';
|
||||||
|
|
||||||
|
@Entity()
|
||||||
|
export class User {
|
||||||
|
@PrimaryGeneratedColumn('uuid')
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
@Column({ unique: true })
|
||||||
|
email: string;
|
||||||
|
|
||||||
|
@Column()
|
||||||
|
name: string;
|
||||||
|
|
||||||
|
@CreateDateColumn()
|
||||||
|
createdAt: Date;
|
||||||
|
|
||||||
|
@UpdateDateColumn()
|
||||||
|
updatedAt: Date;
|
||||||
|
|
||||||
|
@OneToMany(() => Project, project => project.owner)
|
||||||
|
ownedProjects: Project[];
|
||||||
|
|
||||||
|
@ManyToMany(() => Project, project => project.members)
|
||||||
|
@JoinTable()
|
||||||
|
projects: Project[];
|
||||||
|
}
|
||||||
|
|
||||||
|
@Entity()
|
||||||
|
export class Project {
|
||||||
|
@PrimaryGeneratedColumn('uuid')
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
@Column()
|
||||||
|
title: string;
|
||||||
|
|
||||||
|
@Column({ nullable: true })
|
||||||
|
description: string;
|
||||||
|
|
||||||
|
@Column({ default: 'active' })
|
||||||
|
status: string;
|
||||||
|
|
||||||
|
@ManyToOne(() => User, user => user.ownedProjects)
|
||||||
|
owner: User;
|
||||||
|
|
||||||
|
@ManyToMany(() => User, user => user.projects)
|
||||||
|
members: User[];
|
||||||
|
|
||||||
|
@OneToMany(() => Task, task => task.project)
|
||||||
|
tasks: Task[];
|
||||||
|
|
||||||
|
@CreateDateColumn()
|
||||||
|
createdAt: Date;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Entity()
|
||||||
|
export class Task {
|
||||||
|
@PrimaryGeneratedColumn('uuid')
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
@Column()
|
||||||
|
title: string;
|
||||||
|
|
||||||
|
@Column({ type: 'text', nullable: true })
|
||||||
|
description: string;
|
||||||
|
|
||||||
|
@Column({ default: 'todo' })
|
||||||
|
status: string;
|
||||||
|
|
||||||
|
@Column({ nullable: true })
|
||||||
|
dueDate: Date;
|
||||||
|
|
||||||
|
@ManyToOne(() => Project, project => project.tasks)
|
||||||
|
project: Project;
|
||||||
|
|
||||||
|
@ManyToOne(() => User, { nullable: true })
|
||||||
|
assignee: User;
|
||||||
|
|
||||||
|
@OneToMany(() => Comment, comment => comment.task)
|
||||||
|
comments: Comment[];
|
||||||
|
}
|
||||||
|
|
||||||
|
@Entity()
|
||||||
|
export class Comment {
|
||||||
|
@PrimaryGeneratedColumn('uuid')
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
@Column('text')
|
||||||
|
content: string;
|
||||||
|
|
||||||
|
@ManyToOne(() => Task, task => task.comments)
|
||||||
|
task: Task;
|
||||||
|
|
||||||
|
@ManyToOne(() => User)
|
||||||
|
author: User;
|
||||||
|
|
||||||
|
@CreateDateColumn()
|
||||||
|
createdAt: Date;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Entity()
|
||||||
|
export class Tag {
|
||||||
|
@PrimaryGeneratedColumn('uuid')
|
||||||
|
id: string;
|
||||||
|
|
||||||
|
@Column({ unique: true })
|
||||||
|
name: string;
|
||||||
|
|
||||||
|
@Column()
|
||||||
|
color: string;
|
||||||
|
|
||||||
|
@ManyToMany(() => Task)
|
||||||
|
@JoinTable()
|
||||||
|
tasks: Task[];
|
||||||
|
}
|
||||||
21
vendor/github.com/jinzhu/inflection/LICENSE
generated
vendored
Normal file
21
vendor/github.com/jinzhu/inflection/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 - Jinzhu
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
55
vendor/github.com/jinzhu/inflection/README.md
generated
vendored
Normal file
55
vendor/github.com/jinzhu/inflection/README.md
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
# Inflection
|
||||||
|
|
||||||
|
Inflection pluralizes and singularizes English nouns
|
||||||
|
|
||||||
|
[](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930)
|
||||||
|
|
||||||
|
## Basic Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
inflection.Plural("person") => "people"
|
||||||
|
inflection.Plural("Person") => "People"
|
||||||
|
inflection.Plural("PERSON") => "PEOPLE"
|
||||||
|
inflection.Plural("bus") => "buses"
|
||||||
|
inflection.Plural("BUS") => "BUSES"
|
||||||
|
inflection.Plural("Bus") => "Buses"
|
||||||
|
|
||||||
|
inflection.Singular("people") => "person"
|
||||||
|
inflection.Singular("People") => "Person"
|
||||||
|
inflection.Singular("PEOPLE") => "PERSON"
|
||||||
|
inflection.Singular("buses") => "bus"
|
||||||
|
inflection.Singular("BUSES") => "BUS"
|
||||||
|
inflection.Singular("Buses") => "Bus"
|
||||||
|
|
||||||
|
inflection.Plural("FancyPerson") => "FancyPeople"
|
||||||
|
inflection.Singular("FancyPeople") => "FancyPerson"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Register Rules
|
||||||
|
|
||||||
|
Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
|
||||||
|
|
||||||
|
If you want to register more rules, follow:
|
||||||
|
|
||||||
|
```
|
||||||
|
inflection.AddUncountable("fish")
|
||||||
|
inflection.AddIrregular("person", "people")
|
||||||
|
inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
|
||||||
|
inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
|
||||||
|
|
||||||
|
## Author
|
||||||
|
|
||||||
|
**jinzhu**
|
||||||
|
|
||||||
|
* <http://github.com/jinzhu>
|
||||||
|
* <wosmvp@gmail.com>
|
||||||
|
* <http://twitter.com/zhangjinzhu>
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Released under the [MIT License](http://www.opensource.org/licenses/MIT).
|
||||||
273
vendor/github.com/jinzhu/inflection/inflections.go
generated
vendored
Normal file
273
vendor/github.com/jinzhu/inflection/inflections.go
generated
vendored
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
/*
|
||||||
|
Package inflection pluralizes and singularizes English nouns.
|
||||||
|
|
||||||
|
inflection.Plural("person") => "people"
|
||||||
|
inflection.Plural("Person") => "People"
|
||||||
|
inflection.Plural("PERSON") => "PEOPLE"
|
||||||
|
|
||||||
|
inflection.Singular("people") => "person"
|
||||||
|
inflection.Singular("People") => "Person"
|
||||||
|
inflection.Singular("PEOPLE") => "PERSON"
|
||||||
|
|
||||||
|
inflection.Plural("FancyPerson") => "FancydPeople"
|
||||||
|
inflection.Singular("FancyPeople") => "FancydPerson"
|
||||||
|
|
||||||
|
Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
|
||||||
|
|
||||||
|
If you want to register more rules, follow:
|
||||||
|
|
||||||
|
inflection.AddUncountable("fish")
|
||||||
|
inflection.AddIrregular("person", "people")
|
||||||
|
inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
|
||||||
|
inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
|
||||||
|
*/
|
||||||
|
package inflection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type inflection struct {
|
||||||
|
regexp *regexp.Regexp
|
||||||
|
replace string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regular is a regexp find replace inflection
|
||||||
|
type Regular struct {
|
||||||
|
find string
|
||||||
|
replace string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Irregular is a hard replace inflection,
|
||||||
|
// containing both singular and plural forms
|
||||||
|
type Irregular struct {
|
||||||
|
singular string
|
||||||
|
plural string
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegularSlice is a slice of Regular inflections
|
||||||
|
type RegularSlice []Regular
|
||||||
|
|
||||||
|
// IrregularSlice is a slice of Irregular inflections
|
||||||
|
type IrregularSlice []Irregular
|
||||||
|
|
||||||
|
var pluralInflections = RegularSlice{
|
||||||
|
{"([a-z])$", "${1}s"},
|
||||||
|
{"s$", "s"},
|
||||||
|
{"^(ax|test)is$", "${1}es"},
|
||||||
|
{"(octop|vir)us$", "${1}i"},
|
||||||
|
{"(octop|vir)i$", "${1}i"},
|
||||||
|
{"(alias|status)$", "${1}es"},
|
||||||
|
{"(bu)s$", "${1}ses"},
|
||||||
|
{"(buffal|tomat)o$", "${1}oes"},
|
||||||
|
{"([ti])um$", "${1}a"},
|
||||||
|
{"([ti])a$", "${1}a"},
|
||||||
|
{"sis$", "ses"},
|
||||||
|
{"(?:([^f])fe|([lr])f)$", "${1}${2}ves"},
|
||||||
|
{"(hive)$", "${1}s"},
|
||||||
|
{"([^aeiouy]|qu)y$", "${1}ies"},
|
||||||
|
{"(x|ch|ss|sh)$", "${1}es"},
|
||||||
|
{"(matr|vert|ind)(?:ix|ex)$", "${1}ices"},
|
||||||
|
{"^(m|l)ouse$", "${1}ice"},
|
||||||
|
{"^(m|l)ice$", "${1}ice"},
|
||||||
|
{"^(ox)$", "${1}en"},
|
||||||
|
{"^(oxen)$", "${1}"},
|
||||||
|
{"(quiz)$", "${1}zes"},
|
||||||
|
}
|
||||||
|
|
||||||
|
var singularInflections = RegularSlice{
|
||||||
|
{"s$", ""},
|
||||||
|
{"(ss)$", "${1}"},
|
||||||
|
{"(n)ews$", "${1}ews"},
|
||||||
|
{"([ti])a$", "${1}um"},
|
||||||
|
{"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"},
|
||||||
|
{"(^analy)(sis|ses)$", "${1}sis"},
|
||||||
|
{"([^f])ves$", "${1}fe"},
|
||||||
|
{"(hive)s$", "${1}"},
|
||||||
|
{"(tive)s$", "${1}"},
|
||||||
|
{"([lr])ves$", "${1}f"},
|
||||||
|
{"([^aeiouy]|qu)ies$", "${1}y"},
|
||||||
|
{"(s)eries$", "${1}eries"},
|
||||||
|
{"(m)ovies$", "${1}ovie"},
|
||||||
|
{"(c)ookies$", "${1}ookie"},
|
||||||
|
{"(x|ch|ss|sh)es$", "${1}"},
|
||||||
|
{"^(m|l)ice$", "${1}ouse"},
|
||||||
|
{"(bus)(es)?$", "${1}"},
|
||||||
|
{"(o)es$", "${1}"},
|
||||||
|
{"(shoe)s$", "${1}"},
|
||||||
|
{"(cris|test)(is|es)$", "${1}is"},
|
||||||
|
{"^(a)x[ie]s$", "${1}xis"},
|
||||||
|
{"(octop|vir)(us|i)$", "${1}us"},
|
||||||
|
{"(alias|status)(es)?$", "${1}"},
|
||||||
|
{"^(ox)en", "${1}"},
|
||||||
|
{"(vert|ind)ices$", "${1}ex"},
|
||||||
|
{"(matr)ices$", "${1}ix"},
|
||||||
|
{"(quiz)zes$", "${1}"},
|
||||||
|
{"(database)s$", "${1}"},
|
||||||
|
}
|
||||||
|
|
||||||
|
var irregularInflections = IrregularSlice{
|
||||||
|
{"person", "people"},
|
||||||
|
{"man", "men"},
|
||||||
|
{"child", "children"},
|
||||||
|
{"sex", "sexes"},
|
||||||
|
{"move", "moves"},
|
||||||
|
{"mombie", "mombies"},
|
||||||
|
}
|
||||||
|
|
||||||
|
var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"}
|
||||||
|
|
||||||
|
var compiledPluralMaps []inflection
|
||||||
|
var compiledSingularMaps []inflection
|
||||||
|
|
||||||
|
func compile() {
|
||||||
|
compiledPluralMaps = []inflection{}
|
||||||
|
compiledSingularMaps = []inflection{}
|
||||||
|
for _, uncountable := range uncountableInflections {
|
||||||
|
inf := inflection{
|
||||||
|
regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"),
|
||||||
|
replace: "${1}",
|
||||||
|
}
|
||||||
|
compiledPluralMaps = append(compiledPluralMaps, inf)
|
||||||
|
compiledSingularMaps = append(compiledSingularMaps, inf)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, value := range irregularInflections {
|
||||||
|
infs := []inflection{
|
||||||
|
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)},
|
||||||
|
inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)},
|
||||||
|
inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural},
|
||||||
|
}
|
||||||
|
compiledPluralMaps = append(compiledPluralMaps, infs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, value := range irregularInflections {
|
||||||
|
infs := []inflection{
|
||||||
|
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)},
|
||||||
|
inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)},
|
||||||
|
inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular},
|
||||||
|
}
|
||||||
|
compiledSingularMaps = append(compiledSingularMaps, infs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := len(pluralInflections) - 1; i >= 0; i-- {
|
||||||
|
value := pluralInflections[i]
|
||||||
|
infs := []inflection{
|
||||||
|
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
|
||||||
|
inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
|
||||||
|
inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
|
||||||
|
}
|
||||||
|
compiledPluralMaps = append(compiledPluralMaps, infs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := len(singularInflections) - 1; i >= 0; i-- {
|
||||||
|
value := singularInflections[i]
|
||||||
|
infs := []inflection{
|
||||||
|
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
|
||||||
|
inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
|
||||||
|
inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
|
||||||
|
}
|
||||||
|
compiledSingularMaps = append(compiledSingularMaps, infs...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPlural adds a plural inflection
|
||||||
|
func AddPlural(find, replace string) {
|
||||||
|
pluralInflections = append(pluralInflections, Regular{find, replace})
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSingular adds a singular inflection
|
||||||
|
func AddSingular(find, replace string) {
|
||||||
|
singularInflections = append(singularInflections, Regular{find, replace})
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddIrregular adds an irregular inflection
|
||||||
|
func AddIrregular(singular, plural string) {
|
||||||
|
irregularInflections = append(irregularInflections, Irregular{singular, plural})
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUncountable adds an uncountable inflection
|
||||||
|
func AddUncountable(values ...string) {
|
||||||
|
uncountableInflections = append(uncountableInflections, values...)
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPlural retrieves the plural inflection values
|
||||||
|
func GetPlural() RegularSlice {
|
||||||
|
plurals := make(RegularSlice, len(pluralInflections))
|
||||||
|
copy(plurals, pluralInflections)
|
||||||
|
return plurals
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSingular retrieves the singular inflection values
|
||||||
|
func GetSingular() RegularSlice {
|
||||||
|
singulars := make(RegularSlice, len(singularInflections))
|
||||||
|
copy(singulars, singularInflections)
|
||||||
|
return singulars
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIrregular retrieves the irregular inflection values
|
||||||
|
func GetIrregular() IrregularSlice {
|
||||||
|
irregular := make(IrregularSlice, len(irregularInflections))
|
||||||
|
copy(irregular, irregularInflections)
|
||||||
|
return irregular
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUncountable retrieves the uncountable inflection values
|
||||||
|
func GetUncountable() []string {
|
||||||
|
uncountables := make([]string, len(uncountableInflections))
|
||||||
|
copy(uncountables, uncountableInflections)
|
||||||
|
return uncountables
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPlural sets the plural inflections slice
|
||||||
|
func SetPlural(inflections RegularSlice) {
|
||||||
|
pluralInflections = inflections
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSingular sets the singular inflections slice
|
||||||
|
func SetSingular(inflections RegularSlice) {
|
||||||
|
singularInflections = inflections
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIrregular sets the irregular inflections slice
|
||||||
|
func SetIrregular(inflections IrregularSlice) {
|
||||||
|
irregularInflections = inflections
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUncountable sets the uncountable inflections slice
|
||||||
|
func SetUncountable(inflections []string) {
|
||||||
|
uncountableInflections = inflections
|
||||||
|
compile()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Plural converts a word to its plural form
|
||||||
|
func Plural(str string) string {
|
||||||
|
for _, inflection := range compiledPluralMaps {
|
||||||
|
if inflection.regexp.MatchString(str) {
|
||||||
|
return inflection.regexp.ReplaceAllString(str, inflection.replace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
// Singular converts a word to its singular form
|
||||||
|
func Singular(str string) string {
|
||||||
|
for _, inflection := range compiledSingularMaps {
|
||||||
|
if inflection.regexp.MatchString(str) {
|
||||||
|
return inflection.regexp.ReplaceAllString(str, inflection.replace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return str
|
||||||
|
}
|
||||||
23
vendor/github.com/jinzhu/inflection/wercker.yml
generated
vendored
Normal file
23
vendor/github.com/jinzhu/inflection/wercker.yml
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
box: golang
|
||||||
|
|
||||||
|
build:
|
||||||
|
steps:
|
||||||
|
- setup-go-workspace
|
||||||
|
|
||||||
|
# Gets the dependencies
|
||||||
|
- script:
|
||||||
|
name: go get
|
||||||
|
code: |
|
||||||
|
go get
|
||||||
|
|
||||||
|
# Build the project
|
||||||
|
- script:
|
||||||
|
name: go build
|
||||||
|
code: |
|
||||||
|
go build ./...
|
||||||
|
|
||||||
|
# Test the project
|
||||||
|
- script:
|
||||||
|
name: go test
|
||||||
|
code: |
|
||||||
|
go test ./...
|
||||||
15
vendor/github.com/puzpuzpuz/xsync/v3/.gitignore
generated
vendored
Normal file
15
vendor/github.com/puzpuzpuz/xsync/v3/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Binaries for programs and plugins
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
|
||||||
|
# Test binary, built with `go test -c`
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
||||||
|
|
||||||
|
# Dependency directories (remove the comment below to include it)
|
||||||
|
# vendor/
|
||||||
133
vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md
generated
vendored
Normal file
133
vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
# xsync benchmarks
|
||||||
|
|
||||||
|
If you're interested in `MapOf` comparison with some of the popular concurrent hash maps written in Go, check [this](https://github.com/cornelk/hashmap/pull/70) and [this](https://github.com/alphadose/haxmap/pull/22) PRs.
|
||||||
|
|
||||||
|
The below results were obtained for xsync v2.3.1 on a c6g.metal EC2 instance (64 CPU, 128GB RAM) running Linux and Go 1.19.3. I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks.
|
||||||
|
|
||||||
|
The following commands were used to run the benchmarks:
|
||||||
|
```bash
|
||||||
|
$ go test -run='^$' -cpu=1,2,4,8,16,32,64 -bench . -count=30 -timeout=0 | tee bench.txt
|
||||||
|
$ benchstat bench.txt | tee benchstat.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
The below sections contain some of the results. Refer to [this gist](https://gist.github.com/puzpuzpuz/e62e38e06feadecfdc823c0f941ece0b) for the complete output.
|
||||||
|
|
||||||
|
Please note that `MapOf` got a number of optimizations since v2.3.1, so the current result is likely to be different.
|
||||||
|
|
||||||
|
### Counter vs. atomic int64
|
||||||
|
|
||||||
|
```
|
||||||
|
name time/op
|
||||||
|
Counter 27.3ns ± 1%
|
||||||
|
Counter-2 27.2ns ±11%
|
||||||
|
Counter-4 15.3ns ± 8%
|
||||||
|
Counter-8 7.43ns ± 7%
|
||||||
|
Counter-16 3.70ns ±10%
|
||||||
|
Counter-32 1.77ns ± 3%
|
||||||
|
Counter-64 0.96ns ±10%
|
||||||
|
AtomicInt64 7.60ns ± 0%
|
||||||
|
AtomicInt64-2 12.6ns ±13%
|
||||||
|
AtomicInt64-4 13.5ns ±14%
|
||||||
|
AtomicInt64-8 12.7ns ± 9%
|
||||||
|
AtomicInt64-16 12.8ns ± 8%
|
||||||
|
AtomicInt64-32 13.0ns ± 6%
|
||||||
|
AtomicInt64-64 12.9ns ± 7%
|
||||||
|
```
|
||||||
|
|
||||||
|
Here `time/op` stands for average time spent on operation. If you divide `10^9` by the result in nanoseconds per operation, you'd get the throughput in operations per second. Thus, the ideal theoretical scalability of a concurrent data structure implies that the reported `time/op` decreases proportionally with the increased number of CPU cores. On the contrary, if the measured time per operation increases when run on more cores, it means performance degradation.
|
||||||
|
|
||||||
|
### MapOf vs. sync.Map
|
||||||
|
|
||||||
|
1,000 `[int, int]` entries with a warm-up, 100% Loads:
|
||||||
|
```
|
||||||
|
IntegerMapOf_WarmUp/reads=100% 24.0ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=100%-2 12.0ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=100%-4 6.02ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=100%-8 3.01ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=100%-16 1.50ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=100%-32 0.75ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=100%-64 0.38ns ± 0%
|
||||||
|
IntegerMapStandard_WarmUp/reads=100% 55.3ns ± 0%
|
||||||
|
IntegerMapStandard_WarmUp/reads=100%-2 27.6ns ± 0%
|
||||||
|
IntegerMapStandard_WarmUp/reads=100%-4 16.1ns ± 3%
|
||||||
|
IntegerMapStandard_WarmUp/reads=100%-8 8.35ns ± 7%
|
||||||
|
IntegerMapStandard_WarmUp/reads=100%-16 4.24ns ± 7%
|
||||||
|
IntegerMapStandard_WarmUp/reads=100%-32 2.18ns ± 6%
|
||||||
|
IntegerMapStandard_WarmUp/reads=100%-64 1.11ns ± 3%
|
||||||
|
```
|
||||||
|
|
||||||
|
1,000 `[int, int]` entries with a warm-up, 99% Loads, 0.5% Stores, 0.5% Deletes:
|
||||||
|
```
|
||||||
|
IntegerMapOf_WarmUp/reads=99% 31.0ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=99%-2 16.4ns ± 1%
|
||||||
|
IntegerMapOf_WarmUp/reads=99%-4 8.42ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=99%-8 4.41ns ± 0%
|
||||||
|
IntegerMapOf_WarmUp/reads=99%-16 2.38ns ± 2%
|
||||||
|
IntegerMapOf_WarmUp/reads=99%-32 1.37ns ± 4%
|
||||||
|
IntegerMapOf_WarmUp/reads=99%-64 0.85ns ± 2%
|
||||||
|
IntegerMapStandard_WarmUp/reads=99% 121ns ± 1%
|
||||||
|
IntegerMapStandard_WarmUp/reads=99%-2 109ns ± 3%
|
||||||
|
IntegerMapStandard_WarmUp/reads=99%-4 115ns ± 4%
|
||||||
|
IntegerMapStandard_WarmUp/reads=99%-8 114ns ± 2%
|
||||||
|
IntegerMapStandard_WarmUp/reads=99%-16 105ns ± 2%
|
||||||
|
IntegerMapStandard_WarmUp/reads=99%-32 97.0ns ± 3%
|
||||||
|
IntegerMapStandard_WarmUp/reads=99%-64 98.0ns ± 2%
|
||||||
|
```
|
||||||
|
|
||||||
|
1,000 `[int, int]` entries with a warm-up, 75% Loads, 12.5% Stores, 12.5% Deletes:
|
||||||
|
```
|
||||||
|
IntegerMapOf_WarmUp/reads=75%-reads 46.2ns ± 1%
|
||||||
|
IntegerMapOf_WarmUp/reads=75%-reads-2 36.7ns ± 2%
|
||||||
|
IntegerMapOf_WarmUp/reads=75%-reads-4 22.0ns ± 1%
|
||||||
|
IntegerMapOf_WarmUp/reads=75%-reads-8 12.8ns ± 2%
|
||||||
|
IntegerMapOf_WarmUp/reads=75%-reads-16 7.69ns ± 1%
|
||||||
|
IntegerMapOf_WarmUp/reads=75%-reads-32 5.16ns ± 1%
|
||||||
|
IntegerMapOf_WarmUp/reads=75%-reads-64 4.91ns ± 1%
|
||||||
|
IntegerMapStandard_WarmUp/reads=75%-reads 156ns ± 0%
|
||||||
|
IntegerMapStandard_WarmUp/reads=75%-reads-2 177ns ± 1%
|
||||||
|
IntegerMapStandard_WarmUp/reads=75%-reads-4 197ns ± 1%
|
||||||
|
IntegerMapStandard_WarmUp/reads=75%-reads-8 221ns ± 2%
|
||||||
|
IntegerMapStandard_WarmUp/reads=75%-reads-16 242ns ± 1%
|
||||||
|
IntegerMapStandard_WarmUp/reads=75%-reads-32 258ns ± 1%
|
||||||
|
IntegerMapStandard_WarmUp/reads=75%-reads-64 264ns ± 1%
|
||||||
|
```
|
||||||
|
|
||||||
|
### MPMCQueue vs. Go channels
|
||||||
|
|
||||||
|
Concurrent producers and consumers (1:1), queue/channel size 1,000, some work done by both producers and consumers:
|
||||||
|
```
|
||||||
|
QueueProdConsWork100 252ns ± 0%
|
||||||
|
QueueProdConsWork100-2 206ns ± 5%
|
||||||
|
QueueProdConsWork100-4 136ns ±12%
|
||||||
|
QueueProdConsWork100-8 110ns ± 6%
|
||||||
|
QueueProdConsWork100-16 108ns ± 2%
|
||||||
|
QueueProdConsWork100-32 102ns ± 2%
|
||||||
|
QueueProdConsWork100-64 101ns ± 0%
|
||||||
|
ChanProdConsWork100 283ns ± 0%
|
||||||
|
ChanProdConsWork100-2 406ns ±21%
|
||||||
|
ChanProdConsWork100-4 549ns ± 7%
|
||||||
|
ChanProdConsWork100-8 754ns ± 7%
|
||||||
|
ChanProdConsWork100-16 828ns ± 7%
|
||||||
|
ChanProdConsWork100-32 810ns ± 8%
|
||||||
|
ChanProdConsWork100-64 832ns ± 4%
|
||||||
|
```
|
||||||
|
|
||||||
|
### RBMutex vs. sync.RWMutex
|
||||||
|
|
||||||
|
The writer locks on each 100,000 iteration with some work in the critical section for both readers and the writer:
|
||||||
|
```
|
||||||
|
RBMutexWorkWrite100000 146ns ± 0%
|
||||||
|
RBMutexWorkWrite100000-2 73.3ns ± 0%
|
||||||
|
RBMutexWorkWrite100000-4 36.7ns ± 0%
|
||||||
|
RBMutexWorkWrite100000-8 18.6ns ± 0%
|
||||||
|
RBMutexWorkWrite100000-16 9.83ns ± 3%
|
||||||
|
RBMutexWorkWrite100000-32 5.53ns ± 0%
|
||||||
|
RBMutexWorkWrite100000-64 4.04ns ± 3%
|
||||||
|
RWMutexWorkWrite100000 121ns ± 0%
|
||||||
|
RWMutexWorkWrite100000-2 128ns ± 1%
|
||||||
|
RWMutexWorkWrite100000-4 124ns ± 2%
|
||||||
|
RWMutexWorkWrite100000-8 101ns ± 1%
|
||||||
|
RWMutexWorkWrite100000-16 92.9ns ± 1%
|
||||||
|
RWMutexWorkWrite100000-32 89.9ns ± 1%
|
||||||
|
RWMutexWorkWrite100000-64 88.4ns ± 1%
|
||||||
|
```
|
||||||
201
vendor/github.com/puzpuzpuz/xsync/v3/LICENSE
generated
vendored
Normal file
201
vendor/github.com/puzpuzpuz/xsync/v3/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
195
vendor/github.com/puzpuzpuz/xsync/v3/README.md
generated
vendored
Normal file
195
vendor/github.com/puzpuzpuz/xsync/v3/README.md
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
[](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v3)
|
||||||
|
[](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v3)
|
||||||
|
[](https://codecov.io/gh/puzpuzpuz/xsync)
|
||||||
|
|
||||||
|
# xsync
|
||||||
|
|
||||||
|
Concurrent data structures for Go. Aims to provide more scalable alternatives for some of the data structures from the standard `sync` package, but not only.
|
||||||
|
|
||||||
|
Covered with tests following the approach described [here](https://puzpuzpuz.dev/testing-concurrent-code-for-fun-and-profit).
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
Benchmark results may be found [here](BENCHMARKS.md). I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks on a beefy multicore machine.
|
||||||
|
|
||||||
|
Also, a non-scientific, unfair benchmark comparing Java's [j.u.c.ConcurrentHashMap](https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/concurrent/ConcurrentHashMap.html) and `xsync.MapOf` is available [here](https://puzpuzpuz.dev/concurrent-map-in-go-vs-java-yet-another-meaningless-benchmark).
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
The latest xsync major version is v3, so `/v3` suffix should be used when importing the library:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"github.com/puzpuzpuz/xsync/v3"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note for pre-v3 users*: v1 and v2 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial.
|
||||||
|
|
||||||
|
### Counter
|
||||||
|
|
||||||
|
A `Counter` is a striped `int64` counter inspired by the `j.u.c.a.LongAdder` class from the Java standard library.
|
||||||
|
|
||||||
|
```go
|
||||||
|
c := xsync.NewCounter()
|
||||||
|
// increment and decrement the counter
|
||||||
|
c.Inc()
|
||||||
|
c.Dec()
|
||||||
|
// read the current value
|
||||||
|
v := c.Value()
|
||||||
|
```
|
||||||
|
|
||||||
|
Works better in comparison with a single atomically updated `int64` counter in high contention scenarios.
|
||||||
|
|
||||||
|
### Map
|
||||||
|
|
||||||
|
A `Map` is like a concurrent hash table-based map. It follows the interface of `sync.Map` with a number of valuable extensions like `Compute` or `Size`.
|
||||||
|
|
||||||
|
```go
|
||||||
|
m := xsync.NewMap()
|
||||||
|
m.Store("foo", "bar")
|
||||||
|
v, ok := m.Load("foo")
|
||||||
|
s := m.Size()
|
||||||
|
```
|
||||||
|
|
||||||
|
`Map` uses a modified version of Cache-Line Hash Table (CLHT) data structure: https://github.com/LPD-EPFL/CLHT
|
||||||
|
|
||||||
|
CLHT is built around the idea of organizing the hash table in cache-line-sized buckets, so that on all modern CPUs update operations complete with minimal cache-line transfer. Also, `Get` operations are obstruction-free and involve no writes to shared memory, hence no mutexes or any other sort of locks. Due to this design, in all considered scenarios `Map` outperforms `sync.Map`.
|
||||||
|
|
||||||
|
One important difference with `sync.Map` is that only string keys are supported. That's because Golang standard library does not expose the built-in hash functions for `interface{}` values.
|
||||||
|
|
||||||
|
`MapOf[K, V]` is an implementation with parametrized key and value types. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and fewer atomic operations on reads.
|
||||||
|
|
||||||
|
```go
|
||||||
|
m := xsync.NewMapOf[string, string]()
|
||||||
|
m.Store("foo", "bar")
|
||||||
|
v, ok := m.Load("foo")
|
||||||
|
```
|
||||||
|
|
||||||
|
Apart from CLHT, `MapOf` borrows ideas from Java's `j.u.c.ConcurrentHashMap` (immutable K/V pair structs instead of atomic snapshots) and C++'s `absl::flat_hash_map` (meta memory and SWAR-based lookups). It also has more dense memory layout when compared with `Map`. Long story short, `MapOf` should be preferred over `Map` when possible.
|
||||||
|
|
||||||
|
An important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Point struct {
|
||||||
|
x int32
|
||||||
|
y int32
|
||||||
|
}
|
||||||
|
m := NewMapOf[Point, int]()
|
||||||
|
m.Store(Point{42, 42}, 42)
|
||||||
|
v, ok := m.Load(point{42, 42})
|
||||||
|
```
|
||||||
|
|
||||||
|
Apart from `Range` method available for map iteration, there are also `ToPlainMap`/`ToPlainMapOf` utility functions to convert a `Map`/`MapOf` to a built-in Go's `map`:
|
||||||
|
```go
|
||||||
|
m := xsync.NewMapOf[int, int]()
|
||||||
|
m.Store(42, 42)
|
||||||
|
pm := xsync.ToPlainMapOf(m)
|
||||||
|
```
|
||||||
|
|
||||||
|
Both `Map` and `MapOf` use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers:
|
||||||
|
|
||||||
|
```go
|
||||||
|
m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 {
|
||||||
|
h := uint64(i)
|
||||||
|
h = (h ^ (h >> 33)) * 0xff51afd7ed558ccd
|
||||||
|
h = (h ^ (h >> 33)) * 0xc4ceb9fe1a85ec53
|
||||||
|
return h ^ (h >> 33)
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
When benchmarking concurrent maps, make sure to configure all of the competitors with the same hash function or, at least, take hash function performance into the consideration.
|
||||||
|
|
||||||
|
### SPSCQueue
|
||||||
|
|
||||||
|
A `SPSCQueue` is a bounded single-producer single-consumer concurrent queue. This means that not more than a single goroutine must be publishing items to the queue while not more than a single goroutine must be consuming those items.
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := xsync.NewSPSCQueue(1024)
|
||||||
|
// producer inserts an item into the queue
|
||||||
|
// optimistic insertion attempt; doesn't block
|
||||||
|
inserted := q.TryEnqueue("bar")
|
||||||
|
// consumer obtains an item from the queue
|
||||||
|
// optimistic obtain attempt; doesn't block
|
||||||
|
item, ok := q.TryDequeue() // interface{} pointing to a string
|
||||||
|
```
|
||||||
|
|
||||||
|
`SPSCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := xsync.NewSPSCQueueOf[string](1024)
|
||||||
|
inserted := q.TryEnqueue("foo")
|
||||||
|
item, ok := q.TryDequeue() // string
|
||||||
|
```
|
||||||
|
|
||||||
|
The queue is based on the data structure from this [article](https://rigtorp.se/ringbuffer). The idea is to reduce the CPU cache coherency traffic by keeping cached copies of read and write indexes used by producer and consumer respectively.
|
||||||
|
|
||||||
|
### MPMCQueue
|
||||||
|
|
||||||
|
A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue.
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := xsync.NewMPMCQueue(1024)
|
||||||
|
// producer optimistically inserts an item into the queue
|
||||||
|
// optimistic insertion attempt; doesn't block
|
||||||
|
inserted := q.TryEnqueue("bar")
|
||||||
|
// consumer obtains an item from the queue
|
||||||
|
// optimistic obtain attempt; doesn't block
|
||||||
|
item, ok := q.TryDequeue() // interface{} pointing to a string
|
||||||
|
```
|
||||||
|
|
||||||
|
`MPMCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
|
||||||
|
|
||||||
|
```go
|
||||||
|
q := xsync.NewMPMCQueueOf[string](1024)
|
||||||
|
inserted := q.TryEnqueue("foo")
|
||||||
|
item, ok := q.TryDequeue() // string
|
||||||
|
```
|
||||||
|
|
||||||
|
The queue is based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers.
|
||||||
|
|
||||||
|
The idea of the algorithm is to allow parallelism for concurrent producers and consumers by introducing the notion of tickets, i.e. values of two counters, one per producers/consumers. An atomic increment of one of those counters is the only noticeable contention point in queue operations. The rest of the operation avoids contention on writes thanks to the turn-based read/write access for each of the queue items.
|
||||||
|
|
||||||
|
In essence, `MPMCQueue` is a specialized queue for scenarios where there are multiple concurrent producers and consumers of a single queue running on a large multicore machine.
|
||||||
|
|
||||||
|
To get the optimal performance, you may want to set the queue size to be large enough, say, an order of magnitude greater than the number of producers/consumers, to allow producers and consumers to progress with their queue operations in parallel most of the time.
|
||||||
|
|
||||||
|
### RBMutex
|
||||||
|
|
||||||
|
A `RBMutex` is a reader-biased reader/writer mutual exclusion lock. The lock can be held by many readers or a single writer.
|
||||||
|
|
||||||
|
```go
|
||||||
|
mu := xsync.NewRBMutex()
|
||||||
|
// reader lock calls return a token
|
||||||
|
t := mu.RLock()
|
||||||
|
// the token must be later used to unlock the mutex
|
||||||
|
mu.RUnlock(t)
|
||||||
|
// writer locks are the same as in sync.RWMutex
|
||||||
|
mu.Lock()
|
||||||
|
mu.Unlock()
|
||||||
|
```
|
||||||
|
|
||||||
|
`RBMutex` is based on a modified version of BRAVO (Biased Locking for Reader-Writer Locks) algorithm: https://arxiv.org/pdf/1810.01553.pdf
|
||||||
|
|
||||||
|
The idea of the algorithm is to build on top of an existing reader-writer mutex and introduce a fast path for readers. On the fast path, reader lock attempts are sharded over an internal array based on the reader identity (a token in the case of Golang). This means that readers do not contend over a single atomic counter like it's done in, say, `sync.RWMutex` allowing for better scalability in terms of cores.
|
||||||
|
|
||||||
|
Hence, by the design `RBMutex` is a specialized mutex for scenarios, such as caches, where the vast majority of locks are acquired by readers and write lock acquire attempts are infrequent. In such scenarios, `RBMutex` should perform better than the `sync.RWMutex` on large multicore machines.
|
||||||
|
|
||||||
|
`RBMutex` extends `sync.RWMutex` internally and uses it as the "reader bias disabled" fallback, so the same semantics apply. The only noticeable difference is in the reader tokens returned from the `RLock`/`RUnlock` methods.
|
||||||
|
|
||||||
|
Apart from blocking methods, `RBMutex` also has methods for optimistic locking:
|
||||||
|
```go
|
||||||
|
mu := xsync.NewRBMutex()
|
||||||
|
if locked, t := mu.TryRLock(); locked {
|
||||||
|
// critical reader section...
|
||||||
|
mu.RUnlock(t)
|
||||||
|
}
|
||||||
|
if mu.TryLock() {
|
||||||
|
// critical writer section...
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Licensed under MIT.
|
||||||
99
vendor/github.com/puzpuzpuz/xsync/v3/counter.go
generated
vendored
Normal file
99
vendor/github.com/puzpuzpuz/xsync/v3/counter.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// pool for P tokens
|
||||||
|
var ptokenPool sync.Pool
|
||||||
|
|
||||||
|
// a P token is used to point at the current OS thread (P)
|
||||||
|
// on which the goroutine is run; exact identity of the thread,
|
||||||
|
// as well as P migration tolerance, is not important since
|
||||||
|
// it's used to as a best effort mechanism for assigning
|
||||||
|
// concurrent operations (goroutines) to different stripes of
|
||||||
|
// the counter
|
||||||
|
type ptoken struct {
|
||||||
|
idx uint32
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad [cacheLineSize - 4]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Counter is a striped int64 counter.
|
||||||
|
//
|
||||||
|
// Should be preferred over a single atomically updated int64
|
||||||
|
// counter in high contention scenarios.
|
||||||
|
//
|
||||||
|
// A Counter must not be copied after first use.
|
||||||
|
type Counter struct {
|
||||||
|
stripes []cstripe
|
||||||
|
mask uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type cstripe struct {
|
||||||
|
c int64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad [cacheLineSize - 8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounter creates a new Counter instance.
|
||||||
|
func NewCounter() *Counter {
|
||||||
|
nstripes := nextPowOf2(parallelism())
|
||||||
|
c := Counter{
|
||||||
|
stripes: make([]cstripe, nstripes),
|
||||||
|
mask: nstripes - 1,
|
||||||
|
}
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inc increments the counter by 1.
|
||||||
|
func (c *Counter) Inc() {
|
||||||
|
c.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dec decrements the counter by 1.
|
||||||
|
func (c *Counter) Dec() {
|
||||||
|
c.Add(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the delta to the counter.
|
||||||
|
func (c *Counter) Add(delta int64) {
|
||||||
|
t, ok := ptokenPool.Get().(*ptoken)
|
||||||
|
if !ok {
|
||||||
|
t = new(ptoken)
|
||||||
|
t.idx = runtime_fastrand()
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
stripe := &c.stripes[t.idx&c.mask]
|
||||||
|
cnt := atomic.LoadInt64(&stripe.c)
|
||||||
|
if atomic.CompareAndSwapInt64(&stripe.c, cnt, cnt+delta) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Give a try with another randomly selected stripe.
|
||||||
|
t.idx = runtime_fastrand()
|
||||||
|
}
|
||||||
|
ptokenPool.Put(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the current counter value.
|
||||||
|
// The returned value may not include all of the latest operations in
|
||||||
|
// presence of concurrent modifications of the counter.
|
||||||
|
func (c *Counter) Value() int64 {
|
||||||
|
v := int64(0)
|
||||||
|
for i := 0; i < len(c.stripes); i++ {
|
||||||
|
stripe := &c.stripes[i]
|
||||||
|
v += atomic.LoadInt64(&stripe.c)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the counter to zero.
|
||||||
|
// This method should only be used when it is known that there are
|
||||||
|
// no concurrent modifications of the counter.
|
||||||
|
func (c *Counter) Reset() {
|
||||||
|
for i := 0; i < len(c.stripes); i++ {
|
||||||
|
stripe := &c.stripes[i]
|
||||||
|
atomic.StoreInt64(&stripe.c, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
917
vendor/github.com/puzpuzpuz/xsync/v3/map.go
generated
vendored
Normal file
917
vendor/github.com/puzpuzpuz/xsync/v3/map.go
generated
vendored
Normal file
@@ -0,0 +1,917 @@
|
|||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mapResizeHint int
|
||||||
|
|
||||||
|
const (
|
||||||
|
mapGrowHint mapResizeHint = 0
|
||||||
|
mapShrinkHint mapResizeHint = 1
|
||||||
|
mapClearHint mapResizeHint = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// number of Map entries per bucket; 3 entries lead to size of 64B
|
||||||
|
// (one cache line) on 64-bit machines
|
||||||
|
entriesPerMapBucket = 3
|
||||||
|
// threshold fraction of table occupation to start a table shrinking
|
||||||
|
// when deleting the last entry in a bucket chain
|
||||||
|
mapShrinkFraction = 128
|
||||||
|
// map load factor to trigger a table resize during insertion;
|
||||||
|
// a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen
|
||||||
|
// key-value pairs (this is a soft limit)
|
||||||
|
mapLoadFactor = 0.75
|
||||||
|
// minimal table size, i.e. number of buckets; thus, minimal map
|
||||||
|
// capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen
|
||||||
|
defaultMinMapTableLen = 32
|
||||||
|
// minimum counter stripes to use
|
||||||
|
minMapCounterLen = 8
|
||||||
|
// maximum counter stripes to use; stands for around 4KB of memory
|
||||||
|
maxMapCounterLen = 32
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
topHashMask = uint64((1<<20)-1) << 44
|
||||||
|
topHashEntryMasks = [3]uint64{
|
||||||
|
topHashMask,
|
||||||
|
topHashMask >> 20,
|
||||||
|
topHashMask >> 40,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Map is like a Go map[string]interface{} but is safe for concurrent
|
||||||
|
// use by multiple goroutines without additional locking or
|
||||||
|
// coordination. It follows the interface of sync.Map with
|
||||||
|
// a number of valuable extensions like Compute or Size.
|
||||||
|
//
|
||||||
|
// A Map must not be copied after first use.
|
||||||
|
//
|
||||||
|
// Map uses a modified version of Cache-Line Hash Table (CLHT)
|
||||||
|
// data structure: https://github.com/LPD-EPFL/CLHT
|
||||||
|
//
|
||||||
|
// CLHT is built around idea to organize the hash table in
|
||||||
|
// cache-line-sized buckets, so that on all modern CPUs update
|
||||||
|
// operations complete with at most one cache-line transfer.
|
||||||
|
// Also, Get operations involve no write to memory, as well as no
|
||||||
|
// mutexes or any other sort of locks. Due to this design, in all
|
||||||
|
// considered scenarios Map outperforms sync.Map.
|
||||||
|
//
|
||||||
|
// One important difference with sync.Map is that only string keys
|
||||||
|
// are supported. That's because Golang standard library does not
|
||||||
|
// expose the built-in hash functions for interface{} values.
|
||||||
|
type Map struct {
|
||||||
|
totalGrowths int64
|
||||||
|
totalShrinks int64
|
||||||
|
resizing int64 // resize in progress flag; updated atomically
|
||||||
|
resizeMu sync.Mutex // only used along with resizeCond
|
||||||
|
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
|
||||||
|
table unsafe.Pointer // *mapTable
|
||||||
|
minTableLen int
|
||||||
|
growOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type mapTable struct {
|
||||||
|
buckets []bucketPadded
|
||||||
|
// striped counter for number of table entries;
|
||||||
|
// used to determine if a table shrinking is needed
|
||||||
|
// occupies min(buckets_memory/1024, 64KB) of memory
|
||||||
|
size []counterStripe
|
||||||
|
seed uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type counterStripe struct {
|
||||||
|
c int64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad [cacheLineSize - 8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type bucketPadded struct {
|
||||||
|
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
|
||||||
|
pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte
|
||||||
|
bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
type bucket struct {
|
||||||
|
next unsafe.Pointer // *bucketPadded
|
||||||
|
keys [entriesPerMapBucket]unsafe.Pointer
|
||||||
|
values [entriesPerMapBucket]unsafe.Pointer
|
||||||
|
// topHashMutex is a 2-in-1 value.
|
||||||
|
//
|
||||||
|
// It contains packed top 20 bits (20 MSBs) of hash codes for keys
|
||||||
|
// stored in the bucket:
|
||||||
|
// | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex |
|
||||||
|
// | 20 bits | 20 bits | 20 bits | 3 bits | 1 bit |
|
||||||
|
//
|
||||||
|
// The least significant bit is used for the mutex (TTAS spinlock).
|
||||||
|
topHashMutex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type rangeEntry struct {
|
||||||
|
key unsafe.Pointer
|
||||||
|
value unsafe.Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapConfig defines configurable Map/MapOf options.
|
||||||
|
type MapConfig struct {
|
||||||
|
sizeHint int
|
||||||
|
growOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPresize configures new Map/MapOf instance with capacity enough
|
||||||
|
// to hold sizeHint entries. The capacity is treated as the minimal
|
||||||
|
// capacity meaning that the underlying hash table will never shrink
|
||||||
|
// to a smaller capacity. If sizeHint is zero or negative, the value
|
||||||
|
// is ignored.
|
||||||
|
func WithPresize(sizeHint int) func(*MapConfig) {
|
||||||
|
return func(c *MapConfig) {
|
||||||
|
c.sizeHint = sizeHint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGrowOnly configures new Map/MapOf instance to be grow-only.
|
||||||
|
// This means that the underlying hash table grows in capacity when
|
||||||
|
// new keys are added, but does not shrink when keys are deleted.
|
||||||
|
// The only exception to this rule is the Clear method which
|
||||||
|
// shrinks the hash table back to the initial capacity.
|
||||||
|
func WithGrowOnly() func(*MapConfig) {
|
||||||
|
return func(c *MapConfig) {
|
||||||
|
c.growOnly = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMap creates a new Map instance configured with the given
|
||||||
|
// options.
|
||||||
|
func NewMap(options ...func(*MapConfig)) *Map {
|
||||||
|
c := &MapConfig{
|
||||||
|
sizeHint: defaultMinMapTableLen * entriesPerMapBucket,
|
||||||
|
}
|
||||||
|
for _, o := range options {
|
||||||
|
o(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &Map{}
|
||||||
|
m.resizeCond = *sync.NewCond(&m.resizeMu)
|
||||||
|
var table *mapTable
|
||||||
|
if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
|
||||||
|
table = newMapTable(defaultMinMapTableLen)
|
||||||
|
} else {
|
||||||
|
tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapBucket) / mapLoadFactor))
|
||||||
|
table = newMapTable(int(tableLen))
|
||||||
|
}
|
||||||
|
m.minTableLen = len(table.buckets)
|
||||||
|
m.growOnly = c.growOnly
|
||||||
|
atomic.StorePointer(&m.table, unsafe.Pointer(table))
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMapPresized creates a new Map instance with capacity enough to hold
|
||||||
|
// sizeHint entries. The capacity is treated as the minimal capacity
|
||||||
|
// meaning that the underlying hash table will never shrink to
|
||||||
|
// a smaller capacity. If sizeHint is zero or negative, the value
|
||||||
|
// is ignored.
|
||||||
|
//
|
||||||
|
// Deprecated: use NewMap in combination with WithPresize.
|
||||||
|
func NewMapPresized(sizeHint int) *Map {
|
||||||
|
return NewMap(WithPresize(sizeHint))
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMapTable(minTableLen int) *mapTable {
|
||||||
|
buckets := make([]bucketPadded, minTableLen)
|
||||||
|
counterLen := minTableLen >> 10
|
||||||
|
if counterLen < minMapCounterLen {
|
||||||
|
counterLen = minMapCounterLen
|
||||||
|
} else if counterLen > maxMapCounterLen {
|
||||||
|
counterLen = maxMapCounterLen
|
||||||
|
}
|
||||||
|
counter := make([]counterStripe, counterLen)
|
||||||
|
t := &mapTable{
|
||||||
|
buckets: buckets,
|
||||||
|
size: counter,
|
||||||
|
seed: makeSeed(),
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToPlainMap returns a native map with a copy of xsync Map's
|
||||||
|
// contents. The copied xsync Map should not be modified while
|
||||||
|
// this call is made. If the copied Map is modified, the copying
|
||||||
|
// behavior is the same as in the Range method.
|
||||||
|
func ToPlainMap(m *Map) map[string]interface{} {
|
||||||
|
pm := make(map[string]interface{})
|
||||||
|
if m != nil {
|
||||||
|
m.Range(func(key string, value interface{}) bool {
|
||||||
|
pm[key] = value
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return pm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load returns the value stored in the map for a key, or nil if no
|
||||||
|
// value is present.
|
||||||
|
// The ok result indicates whether value was found in the map.
|
||||||
|
func (m *Map) Load(key string) (value interface{}, ok bool) {
|
||||||
|
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||||
|
hash := hashString(key, table.seed)
|
||||||
|
bidx := uint64(len(table.buckets)-1) & hash
|
||||||
|
b := &table.buckets[bidx]
|
||||||
|
for {
|
||||||
|
topHashes := atomic.LoadUint64(&b.topHashMutex)
|
||||||
|
for i := 0; i < entriesPerMapBucket; i++ {
|
||||||
|
if !topHashMatch(hash, topHashes, i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
atomic_snapshot:
|
||||||
|
// Start atomic snapshot.
|
||||||
|
vp := atomic.LoadPointer(&b.values[i])
|
||||||
|
kp := atomic.LoadPointer(&b.keys[i])
|
||||||
|
if kp != nil && vp != nil {
|
||||||
|
if key == derefKey(kp) {
|
||||||
|
if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) {
|
||||||
|
// Atomic snapshot succeeded.
|
||||||
|
return derefValue(vp), true
|
||||||
|
}
|
||||||
|
// Concurrent update/remove. Go for another spin.
|
||||||
|
goto atomic_snapshot
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bptr := atomic.LoadPointer(&b.next)
|
||||||
|
if bptr == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = (*bucketPadded)(bptr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store sets the value for a key.
|
||||||
|
func (m *Map) Store(key string, value interface{}) {
|
||||||
|
m.doCompute(
|
||||||
|
key,
|
||||||
|
func(interface{}, bool) (interface{}, bool) {
|
||||||
|
return value, false
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOrStore returns the existing value for the key if present.
|
||||||
|
// Otherwise, it stores and returns the given value.
|
||||||
|
// The loaded result is true if the value was loaded, false if stored.
|
||||||
|
func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(interface{}, bool) (interface{}, bool) {
|
||||||
|
return value, false
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAndStore returns the existing value for the key if present,
|
||||||
|
// while setting the new value for the key.
|
||||||
|
// It stores the new value and returns the existing one, if present.
|
||||||
|
// The loaded result is true if the existing value was loaded,
|
||||||
|
// false otherwise.
|
||||||
|
func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(interface{}, bool) (interface{}, bool) {
|
||||||
|
return value, false
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOrCompute returns the existing value for the key if present.
|
||||||
|
// Otherwise, it computes the value using the provided function, and
|
||||||
|
// then stores and returns the computed value. The loaded result is
|
||||||
|
// true if the value was loaded, false if computed.
|
||||||
|
//
|
||||||
|
// This call locks a hash table bucket while the compute function
|
||||||
|
// is executed. It means that modifications on other entries in
|
||||||
|
// the bucket will be blocked until the valueFn executes. Consider
|
||||||
|
// this when the function includes long-running operations.
|
||||||
|
func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(interface{}, bool) (interface{}, bool) {
|
||||||
|
return valueFn(), false
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOrTryCompute returns the existing value for the key if present.
|
||||||
|
// Otherwise, it tries to compute the value using the provided function
|
||||||
|
// and, if successful, stores and returns the computed value. The loaded
|
||||||
|
// result is true if the value was loaded, or false if computed (whether
|
||||||
|
// successfully or not). If the compute attempt was cancelled (due to an
|
||||||
|
// error, for example), a nil value will be returned.
|
||||||
|
//
|
||||||
|
// This call locks a hash table bucket while the compute function
|
||||||
|
// is executed. It means that modifications on other entries in
|
||||||
|
// the bucket will be blocked until the valueFn executes. Consider
|
||||||
|
// this when the function includes long-running operations.
|
||||||
|
func (m *Map) LoadOrTryCompute(
|
||||||
|
key string,
|
||||||
|
valueFn func() (newValue interface{}, cancel bool),
|
||||||
|
) (value interface{}, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(interface{}, bool) (interface{}, bool) {
|
||||||
|
nv, c := valueFn()
|
||||||
|
if !c {
|
||||||
|
return nv, false
|
||||||
|
}
|
||||||
|
return nil, true
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute either sets the computed new value for the key or deletes
|
||||||
|
// the value for the key. When the delete result of the valueFn function
|
||||||
|
// is set to true, the value will be deleted, if it exists. When delete
|
||||||
|
// is set to false, the value is updated to the newValue.
|
||||||
|
// The ok result indicates whether value was computed and stored, thus, is
|
||||||
|
// present in the map. The actual result contains the new value in cases where
|
||||||
|
// the value was computed and stored. See the example for a few use cases.
|
||||||
|
//
|
||||||
|
// This call locks a hash table bucket while the compute function
|
||||||
|
// is executed. It means that modifications on other entries in
|
||||||
|
// the bucket will be blocked until the valueFn executes. Consider
|
||||||
|
// this when the function includes long-running operations.
|
||||||
|
func (m *Map) Compute(
|
||||||
|
key string,
|
||||||
|
valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool),
|
||||||
|
) (actual interface{}, ok bool) {
|
||||||
|
return m.doCompute(key, valueFn, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAndDelete deletes the value for a key, returning the previous
|
||||||
|
// value if any. The loaded result reports whether the key was
|
||||||
|
// present.
|
||||||
|
func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(value interface{}, loaded bool) (interface{}, bool) {
|
||||||
|
return value, true
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the value for a key.
|
||||||
|
func (m *Map) Delete(key string) {
|
||||||
|
m.doCompute(
|
||||||
|
key,
|
||||||
|
func(value interface{}, loaded bool) (interface{}, bool) {
|
||||||
|
return value, true
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) doCompute(
|
||||||
|
key string,
|
||||||
|
valueFn func(oldValue interface{}, loaded bool) (interface{}, bool),
|
||||||
|
loadIfExists, computeOnly bool,
|
||||||
|
) (interface{}, bool) {
|
||||||
|
// Read-only path.
|
||||||
|
if loadIfExists {
|
||||||
|
if v, ok := m.Load(key); ok {
|
||||||
|
return v, !computeOnly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Write path.
|
||||||
|
for {
|
||||||
|
compute_attempt:
|
||||||
|
var (
|
||||||
|
emptyb *bucketPadded
|
||||||
|
emptyidx int
|
||||||
|
hintNonEmpty int
|
||||||
|
)
|
||||||
|
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||||
|
tableLen := len(table.buckets)
|
||||||
|
hash := hashString(key, table.seed)
|
||||||
|
bidx := uint64(len(table.buckets)-1) & hash
|
||||||
|
rootb := &table.buckets[bidx]
|
||||||
|
lockBucket(&rootb.topHashMutex)
|
||||||
|
// The following two checks must go in reverse to what's
|
||||||
|
// in the resize method.
|
||||||
|
if m.resizeInProgress() {
|
||||||
|
// Resize is in progress. Wait, then go for another attempt.
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
m.waitForResize()
|
||||||
|
goto compute_attempt
|
||||||
|
}
|
||||||
|
if m.newerTableExists(table) {
|
||||||
|
// Someone resized the table. Go for another attempt.
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
goto compute_attempt
|
||||||
|
}
|
||||||
|
b := rootb
|
||||||
|
for {
|
||||||
|
topHashes := atomic.LoadUint64(&b.topHashMutex)
|
||||||
|
for i := 0; i < entriesPerMapBucket; i++ {
|
||||||
|
if b.keys[i] == nil {
|
||||||
|
if emptyb == nil {
|
||||||
|
emptyb = b
|
||||||
|
emptyidx = i
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !topHashMatch(hash, topHashes, i) {
|
||||||
|
hintNonEmpty++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if key == derefKey(b.keys[i]) {
|
||||||
|
vp := b.values[i]
|
||||||
|
if loadIfExists {
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
return derefValue(vp), !computeOnly
|
||||||
|
}
|
||||||
|
// In-place update/delete.
|
||||||
|
// We get a copy of the value via an interface{} on each call,
|
||||||
|
// thus the live value pointers are unique. Otherwise atomic
|
||||||
|
// snapshot won't be correct in case of multiple Store calls
|
||||||
|
// using the same value.
|
||||||
|
oldValue := derefValue(vp)
|
||||||
|
newValue, del := valueFn(oldValue, true)
|
||||||
|
if del {
|
||||||
|
// Deletion.
|
||||||
|
// First we update the value, then the key.
|
||||||
|
// This is important for atomic snapshot states.
|
||||||
|
atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i))
|
||||||
|
atomic.StorePointer(&b.values[i], nil)
|
||||||
|
atomic.StorePointer(&b.keys[i], nil)
|
||||||
|
leftEmpty := false
|
||||||
|
if hintNonEmpty == 0 {
|
||||||
|
leftEmpty = isEmptyBucket(b)
|
||||||
|
}
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
table.addSize(bidx, -1)
|
||||||
|
// Might need to shrink the table.
|
||||||
|
if leftEmpty {
|
||||||
|
m.resize(table, mapShrinkHint)
|
||||||
|
}
|
||||||
|
return oldValue, !computeOnly
|
||||||
|
}
|
||||||
|
nvp := unsafe.Pointer(&newValue)
|
||||||
|
if assertionsEnabled && vp == nvp {
|
||||||
|
panic("non-unique value pointer")
|
||||||
|
}
|
||||||
|
atomic.StorePointer(&b.values[i], nvp)
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
if computeOnly {
|
||||||
|
// Compute expects the new value to be returned.
|
||||||
|
return newValue, true
|
||||||
|
}
|
||||||
|
// LoadAndStore expects the old value to be returned.
|
||||||
|
return oldValue, true
|
||||||
|
}
|
||||||
|
hintNonEmpty++
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
if emptyb != nil {
|
||||||
|
// Insertion into an existing bucket.
|
||||||
|
var zeroV interface{}
|
||||||
|
newValue, del := valueFn(zeroV, false)
|
||||||
|
if del {
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
return zeroV, false
|
||||||
|
}
|
||||||
|
// First we update the value, then the key.
|
||||||
|
// This is important for atomic snapshot states.
|
||||||
|
topHashes = atomic.LoadUint64(&emptyb.topHashMutex)
|
||||||
|
atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx))
|
||||||
|
atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue))
|
||||||
|
atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key))
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
table.addSize(bidx, 1)
|
||||||
|
return newValue, computeOnly
|
||||||
|
}
|
||||||
|
growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
|
||||||
|
if table.sumSize() > int64(growThreshold) {
|
||||||
|
// Need to grow the table. Then go for another attempt.
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
m.resize(table, mapGrowHint)
|
||||||
|
goto compute_attempt
|
||||||
|
}
|
||||||
|
// Insertion into a new bucket.
|
||||||
|
var zeroV interface{}
|
||||||
|
newValue, del := valueFn(zeroV, false)
|
||||||
|
if del {
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
return newValue, false
|
||||||
|
}
|
||||||
|
// Create and append a bucket.
|
||||||
|
newb := new(bucketPadded)
|
||||||
|
newb.keys[0] = unsafe.Pointer(&key)
|
||||||
|
newb.values[0] = unsafe.Pointer(&newValue)
|
||||||
|
newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
|
||||||
|
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
table.addSize(bidx, 1)
|
||||||
|
return newValue, computeOnly
|
||||||
|
}
|
||||||
|
b = (*bucketPadded)(b.next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) newerTableExists(table *mapTable) bool {
|
||||||
|
curTablePtr := atomic.LoadPointer(&m.table)
|
||||||
|
return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) resizeInProgress() bool {
|
||||||
|
return atomic.LoadInt64(&m.resizing) == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) waitForResize() {
|
||||||
|
m.resizeMu.Lock()
|
||||||
|
for m.resizeInProgress() {
|
||||||
|
m.resizeCond.Wait()
|
||||||
|
}
|
||||||
|
m.resizeMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) {
|
||||||
|
knownTableLen := len(knownTable.buckets)
|
||||||
|
// Fast path for shrink attempts.
|
||||||
|
if hint == mapShrinkHint {
|
||||||
|
if m.growOnly ||
|
||||||
|
m.minTableLen == knownTableLen ||
|
||||||
|
knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Slow path.
|
||||||
|
if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
|
||||||
|
// Someone else started resize. Wait for it to finish.
|
||||||
|
m.waitForResize()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var newTable *mapTable
|
||||||
|
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||||
|
tableLen := len(table.buckets)
|
||||||
|
switch hint {
|
||||||
|
case mapGrowHint:
|
||||||
|
// Grow the table with factor of 2.
|
||||||
|
atomic.AddInt64(&m.totalGrowths, 1)
|
||||||
|
newTable = newMapTable(tableLen << 1)
|
||||||
|
case mapShrinkHint:
|
||||||
|
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
|
||||||
|
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
|
||||||
|
// Shrink the table with factor of 2.
|
||||||
|
atomic.AddInt64(&m.totalShrinks, 1)
|
||||||
|
newTable = newMapTable(tableLen >> 1)
|
||||||
|
} else {
|
||||||
|
// No need to shrink. Wake up all waiters and give up.
|
||||||
|
m.resizeMu.Lock()
|
||||||
|
atomic.StoreInt64(&m.resizing, 0)
|
||||||
|
m.resizeCond.Broadcast()
|
||||||
|
m.resizeMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case mapClearHint:
|
||||||
|
newTable = newMapTable(m.minTableLen)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
|
||||||
|
}
|
||||||
|
// Copy the data only if we're not clearing the map.
|
||||||
|
if hint != mapClearHint {
|
||||||
|
for i := 0; i < tableLen; i++ {
|
||||||
|
copied := copyBucket(&table.buckets[i], newTable)
|
||||||
|
newTable.addSizePlain(uint64(i), copied)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Publish the new table and wake up all waiters.
|
||||||
|
atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
|
||||||
|
m.resizeMu.Lock()
|
||||||
|
atomic.StoreInt64(&m.resizing, 0)
|
||||||
|
m.resizeCond.Broadcast()
|
||||||
|
m.resizeMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) {
|
||||||
|
rootb := b
|
||||||
|
lockBucket(&rootb.topHashMutex)
|
||||||
|
for {
|
||||||
|
for i := 0; i < entriesPerMapBucket; i++ {
|
||||||
|
if b.keys[i] != nil {
|
||||||
|
k := derefKey(b.keys[i])
|
||||||
|
hash := hashString(k, destTable.seed)
|
||||||
|
bidx := uint64(len(destTable.buckets)-1) & hash
|
||||||
|
destb := &destTable.buckets[bidx]
|
||||||
|
appendToBucket(hash, b.keys[i], b.values[i], destb)
|
||||||
|
copied++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = (*bucketPadded)(b.next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) {
|
||||||
|
for {
|
||||||
|
for i := 0; i < entriesPerMapBucket; i++ {
|
||||||
|
if b.keys[i] == nil {
|
||||||
|
b.keys[i] = keyPtr
|
||||||
|
b.values[i] = valPtr
|
||||||
|
b.topHashMutex = storeTopHash(hash, b.topHashMutex, i)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
newb := new(bucketPadded)
|
||||||
|
newb.keys[0] = keyPtr
|
||||||
|
newb.values[0] = valPtr
|
||||||
|
newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
|
||||||
|
b.next = unsafe.Pointer(newb)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = (*bucketPadded)(b.next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEmptyBucket(rootb *bucketPadded) bool {
|
||||||
|
b := rootb
|
||||||
|
for {
|
||||||
|
for i := 0; i < entriesPerMapBucket; i++ {
|
||||||
|
if b.keys[i] != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
b = (*bucketPadded)(b.next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range calls f sequentially for each key and value present in the
|
||||||
|
// map. If f returns false, range stops the iteration.
|
||||||
|
//
|
||||||
|
// Range does not necessarily correspond to any consistent snapshot
|
||||||
|
// of the Map's contents: no key will be visited more than once, but
|
||||||
|
// if the value for any key is stored or deleted concurrently, Range
|
||||||
|
// may reflect any mapping for that key from any point during the
|
||||||
|
// Range call.
|
||||||
|
//
|
||||||
|
// It is safe to modify the map while iterating it, including entry
|
||||||
|
// creation, modification and deletion. However, the concurrent
|
||||||
|
// modification rule apply, i.e. the changes may be not reflected
|
||||||
|
// in the subsequently iterated entries.
|
||||||
|
func (m *Map) Range(f func(key string, value interface{}) bool) {
|
||||||
|
var zeroEntry rangeEntry
|
||||||
|
// Pre-allocate array big enough to fit entries for most hash tables.
|
||||||
|
bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket)
|
||||||
|
tablep := atomic.LoadPointer(&m.table)
|
||||||
|
table := *(*mapTable)(tablep)
|
||||||
|
for i := range table.buckets {
|
||||||
|
rootb := &table.buckets[i]
|
||||||
|
b := rootb
|
||||||
|
// Prevent concurrent modifications and copy all entries into
|
||||||
|
// the intermediate slice.
|
||||||
|
lockBucket(&rootb.topHashMutex)
|
||||||
|
for {
|
||||||
|
for i := 0; i < entriesPerMapBucket; i++ {
|
||||||
|
if b.keys[i] != nil {
|
||||||
|
bentries = append(bentries, rangeEntry{
|
||||||
|
key: b.keys[i],
|
||||||
|
value: b.values[i],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
unlockBucket(&rootb.topHashMutex)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
b = (*bucketPadded)(b.next)
|
||||||
|
}
|
||||||
|
// Call the function for all copied entries.
|
||||||
|
for j := range bentries {
|
||||||
|
k := derefKey(bentries[j].key)
|
||||||
|
v := derefValue(bentries[j].value)
|
||||||
|
if !f(k, v) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Remove the reference to avoid preventing the copied
|
||||||
|
// entries from being GCed until this method finishes.
|
||||||
|
bentries[j] = zeroEntry
|
||||||
|
}
|
||||||
|
bentries = bentries[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear deletes all keys and values currently stored in the map.
|
||||||
|
func (m *Map) Clear() {
|
||||||
|
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||||
|
m.resize(table, mapClearHint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns current size of the map.
|
||||||
|
func (m *Map) Size() int {
|
||||||
|
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||||
|
return int(table.sumSize())
|
||||||
|
}
|
||||||
|
|
||||||
|
func derefKey(keyPtr unsafe.Pointer) string {
|
||||||
|
return *(*string)(keyPtr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func derefValue(valuePtr unsafe.Pointer) interface{} {
|
||||||
|
return *(*interface{})(valuePtr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lockBucket(mu *uint64) {
|
||||||
|
for {
|
||||||
|
var v uint64
|
||||||
|
for {
|
||||||
|
v = atomic.LoadUint64(mu)
|
||||||
|
if v&1 != 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
if atomic.CompareAndSwapUint64(mu, v, v|1) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockBucket(mu *uint64) {
|
||||||
|
v := atomic.LoadUint64(mu)
|
||||||
|
atomic.StoreUint64(mu, v&^1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func topHashMatch(hash, topHashes uint64, idx int) bool {
|
||||||
|
if topHashes&(1<<(idx+1)) == 0 {
|
||||||
|
// Entry is not present.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
hash = hash & topHashMask
|
||||||
|
topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx)
|
||||||
|
return hash == topHashes
|
||||||
|
}
|
||||||
|
|
||||||
|
func storeTopHash(hash, topHashes uint64, idx int) uint64 {
|
||||||
|
// Zero out top hash at idx.
|
||||||
|
topHashes = topHashes &^ topHashEntryMasks[idx]
|
||||||
|
// Chop top 20 MSBs of the given hash and position them at idx.
|
||||||
|
hash = (hash & topHashMask) >> (20 * idx)
|
||||||
|
// Store the MSBs.
|
||||||
|
topHashes = topHashes | hash
|
||||||
|
// Mark the entry as present.
|
||||||
|
return topHashes | (1 << (idx + 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func eraseTopHash(topHashes uint64, idx int) uint64 {
|
||||||
|
return topHashes &^ (1 << (idx + 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (table *mapTable) addSize(bucketIdx uint64, delta int) {
|
||||||
|
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||||
|
atomic.AddInt64(&table.size[cidx].c, int64(delta))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) {
|
||||||
|
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||||
|
table.size[cidx].c += int64(delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (table *mapTable) sumSize() int64 {
|
||||||
|
sum := int64(0)
|
||||||
|
for i := range table.size {
|
||||||
|
sum += atomic.LoadInt64(&table.size[i].c)
|
||||||
|
}
|
||||||
|
return sum
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapStats is Map/MapOf statistics.
|
||||||
|
//
|
||||||
|
// Warning: map statistics are intented to be used for diagnostic
|
||||||
|
// purposes, not for production code. This means that breaking changes
|
||||||
|
// may be introduced into this struct even between minor releases.
|
||||||
|
type MapStats struct {
|
||||||
|
// RootBuckets is the number of root buckets in the hash table.
|
||||||
|
// Each bucket holds a few entries.
|
||||||
|
RootBuckets int
|
||||||
|
// TotalBuckets is the total number of buckets in the hash table,
|
||||||
|
// including root and their chained buckets. Each bucket holds
|
||||||
|
// a few entries.
|
||||||
|
TotalBuckets int
|
||||||
|
// EmptyBuckets is the number of buckets that hold no entries.
|
||||||
|
EmptyBuckets int
|
||||||
|
// Capacity is the Map/MapOf capacity, i.e. the total number of
|
||||||
|
// entries that all buckets can physically hold. This number
|
||||||
|
// does not consider the load factor.
|
||||||
|
Capacity int
|
||||||
|
// Size is the exact number of entries stored in the map.
|
||||||
|
Size int
|
||||||
|
// Counter is the number of entries stored in the map according
|
||||||
|
// to the internal atomic counter. In case of concurrent map
|
||||||
|
// modifications this number may be different from Size.
|
||||||
|
Counter int
|
||||||
|
// CounterLen is the number of internal atomic counter stripes.
|
||||||
|
// This number may grow with the map capacity to improve
|
||||||
|
// multithreaded scalability.
|
||||||
|
CounterLen int
|
||||||
|
// MinEntries is the minimum number of entries per a chain of
|
||||||
|
// buckets, i.e. a root bucket and its chained buckets.
|
||||||
|
MinEntries int
|
||||||
|
// MinEntries is the maximum number of entries per a chain of
|
||||||
|
// buckets, i.e. a root bucket and its chained buckets.
|
||||||
|
MaxEntries int
|
||||||
|
// TotalGrowths is the number of times the hash table grew.
|
||||||
|
TotalGrowths int64
|
||||||
|
// TotalGrowths is the number of times the hash table shrinked.
|
||||||
|
TotalShrinks int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToString returns string representation of map stats.
|
||||||
|
func (s *MapStats) ToString() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
sb.WriteString("MapStats{\n")
|
||||||
|
sb.WriteString(fmt.Sprintf("RootBuckets: %d\n", s.RootBuckets))
|
||||||
|
sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets))
|
||||||
|
sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets))
|
||||||
|
sb.WriteString(fmt.Sprintf("Capacity: %d\n", s.Capacity))
|
||||||
|
sb.WriteString(fmt.Sprintf("Size: %d\n", s.Size))
|
||||||
|
sb.WriteString(fmt.Sprintf("Counter: %d\n", s.Counter))
|
||||||
|
sb.WriteString(fmt.Sprintf("CounterLen: %d\n", s.CounterLen))
|
||||||
|
sb.WriteString(fmt.Sprintf("MinEntries: %d\n", s.MinEntries))
|
||||||
|
sb.WriteString(fmt.Sprintf("MaxEntries: %d\n", s.MaxEntries))
|
||||||
|
sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths))
|
||||||
|
sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks))
|
||||||
|
sb.WriteString("}\n")
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns statistics for the Map. Just like other map
|
||||||
|
// methods, this one is thread-safe. Yet it's an O(N) operation,
|
||||||
|
// so it should be used only for diagnostics or debugging purposes.
|
||||||
|
func (m *Map) Stats() MapStats {
|
||||||
|
stats := MapStats{
|
||||||
|
TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
|
||||||
|
TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
|
||||||
|
MinEntries: math.MaxInt32,
|
||||||
|
}
|
||||||
|
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||||
|
stats.RootBuckets = len(table.buckets)
|
||||||
|
stats.Counter = int(table.sumSize())
|
||||||
|
stats.CounterLen = len(table.size)
|
||||||
|
for i := range table.buckets {
|
||||||
|
nentries := 0
|
||||||
|
b := &table.buckets[i]
|
||||||
|
stats.TotalBuckets++
|
||||||
|
for {
|
||||||
|
nentriesLocal := 0
|
||||||
|
stats.Capacity += entriesPerMapBucket
|
||||||
|
for i := 0; i < entriesPerMapBucket; i++ {
|
||||||
|
if atomic.LoadPointer(&b.keys[i]) != nil {
|
||||||
|
stats.Size++
|
||||||
|
nentriesLocal++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nentries += nentriesLocal
|
||||||
|
if nentriesLocal == 0 {
|
||||||
|
stats.EmptyBuckets++
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
b = (*bucketPadded)(atomic.LoadPointer(&b.next))
|
||||||
|
stats.TotalBuckets++
|
||||||
|
}
|
||||||
|
if nentries < stats.MinEntries {
|
||||||
|
stats.MinEntries = nentries
|
||||||
|
}
|
||||||
|
if nentries > stats.MaxEntries {
|
||||||
|
stats.MaxEntries = nentries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return stats
|
||||||
|
}
|
||||||
738
vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
generated
vendored
Normal file
738
vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
generated
vendored
Normal file
@@ -0,0 +1,738 @@
|
|||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// number of MapOf entries per bucket; 5 entries lead to size of 64B
|
||||||
|
// (one cache line) on 64-bit machines
|
||||||
|
entriesPerMapOfBucket = 5
|
||||||
|
defaultMeta uint64 = 0x8080808080808080
|
||||||
|
metaMask uint64 = 0xffffffffff
|
||||||
|
defaultMetaMasked uint64 = defaultMeta & metaMask
|
||||||
|
emptyMetaSlot uint8 = 0x80
|
||||||
|
)
|
||||||
|
|
||||||
|
// MapOf is like a Go map[K]V but is safe for concurrent
|
||||||
|
// use by multiple goroutines without additional locking or
|
||||||
|
// coordination. It follows the interface of sync.Map with
|
||||||
|
// a number of valuable extensions like Compute or Size.
|
||||||
|
//
|
||||||
|
// A MapOf must not be copied after first use.
|
||||||
|
//
|
||||||
|
// MapOf uses a modified version of Cache-Line Hash Table (CLHT)
|
||||||
|
// data structure: https://github.com/LPD-EPFL/CLHT
|
||||||
|
//
|
||||||
|
// CLHT is built around idea to organize the hash table in
|
||||||
|
// cache-line-sized buckets, so that on all modern CPUs update
|
||||||
|
// operations complete with at most one cache-line transfer.
|
||||||
|
// Also, Get operations involve no write to memory, as well as no
|
||||||
|
// mutexes or any other sort of locks. Due to this design, in all
|
||||||
|
// considered scenarios MapOf outperforms sync.Map.
|
||||||
|
//
|
||||||
|
// MapOf also borrows ideas from Java's j.u.c.ConcurrentHashMap
|
||||||
|
// (immutable K/V pair structs instead of atomic snapshots)
|
||||||
|
// and C++'s absl::flat_hash_map (meta memory and SWAR-based
|
||||||
|
// lookups).
|
||||||
|
type MapOf[K comparable, V any] struct {
|
||||||
|
totalGrowths int64
|
||||||
|
totalShrinks int64
|
||||||
|
resizing int64 // resize in progress flag; updated atomically
|
||||||
|
resizeMu sync.Mutex // only used along with resizeCond
|
||||||
|
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
|
||||||
|
table unsafe.Pointer // *mapOfTable
|
||||||
|
hasher func(K, uint64) uint64
|
||||||
|
minTableLen int
|
||||||
|
growOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type mapOfTable[K comparable, V any] struct {
|
||||||
|
buckets []bucketOfPadded
|
||||||
|
// striped counter for number of table entries;
|
||||||
|
// used to determine if a table shrinking is needed
|
||||||
|
// occupies min(buckets_memory/1024, 64KB) of memory
|
||||||
|
size []counterStripe
|
||||||
|
seed uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// bucketOfPadded is a CL-sized map bucket holding up to
|
||||||
|
// entriesPerMapOfBucket entries.
|
||||||
|
type bucketOfPadded struct {
|
||||||
|
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
|
||||||
|
pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte
|
||||||
|
bucketOf
|
||||||
|
}
|
||||||
|
|
||||||
|
type bucketOf struct {
|
||||||
|
meta uint64
|
||||||
|
entries [entriesPerMapOfBucket]unsafe.Pointer // *entryOf
|
||||||
|
next unsafe.Pointer // *bucketOfPadded
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// entryOf is an immutable map entry.
|
||||||
|
type entryOf[K comparable, V any] struct {
|
||||||
|
key K
|
||||||
|
value V
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMapOf creates a new MapOf instance configured with the given
|
||||||
|
// options.
|
||||||
|
func NewMapOf[K comparable, V any](options ...func(*MapConfig)) *MapOf[K, V] {
|
||||||
|
return NewMapOfWithHasher[K, V](defaultHasher[K](), options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMapOfWithHasher creates a new MapOf instance configured with
|
||||||
|
// the given hasher and options. The hash function is used instead
|
||||||
|
// of the built-in hash function configured when a map is created
|
||||||
|
// with the NewMapOf function.
|
||||||
|
func NewMapOfWithHasher[K comparable, V any](
|
||||||
|
hasher func(K, uint64) uint64,
|
||||||
|
options ...func(*MapConfig),
|
||||||
|
) *MapOf[K, V] {
|
||||||
|
c := &MapConfig{
|
||||||
|
sizeHint: defaultMinMapTableLen * entriesPerMapOfBucket,
|
||||||
|
}
|
||||||
|
for _, o := range options {
|
||||||
|
o(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &MapOf[K, V]{}
|
||||||
|
m.resizeCond = *sync.NewCond(&m.resizeMu)
|
||||||
|
m.hasher = hasher
|
||||||
|
var table *mapOfTable[K, V]
|
||||||
|
if c.sizeHint <= defaultMinMapTableLen*entriesPerMapOfBucket {
|
||||||
|
table = newMapOfTable[K, V](defaultMinMapTableLen)
|
||||||
|
} else {
|
||||||
|
tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapOfBucket) / mapLoadFactor))
|
||||||
|
table = newMapOfTable[K, V](int(tableLen))
|
||||||
|
}
|
||||||
|
m.minTableLen = len(table.buckets)
|
||||||
|
m.growOnly = c.growOnly
|
||||||
|
atomic.StorePointer(&m.table, unsafe.Pointer(table))
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMapOfPresized creates a new MapOf instance with capacity enough
|
||||||
|
// to hold sizeHint entries. The capacity is treated as the minimal capacity
|
||||||
|
// meaning that the underlying hash table will never shrink to
|
||||||
|
// a smaller capacity. If sizeHint is zero or negative, the value
|
||||||
|
// is ignored.
|
||||||
|
//
|
||||||
|
// Deprecated: use NewMapOf in combination with WithPresize.
|
||||||
|
func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] {
|
||||||
|
return NewMapOf[K, V](WithPresize(sizeHint))
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
|
||||||
|
buckets := make([]bucketOfPadded, minTableLen)
|
||||||
|
for i := range buckets {
|
||||||
|
buckets[i].meta = defaultMeta
|
||||||
|
}
|
||||||
|
counterLen := minTableLen >> 10
|
||||||
|
if counterLen < minMapCounterLen {
|
||||||
|
counterLen = minMapCounterLen
|
||||||
|
} else if counterLen > maxMapCounterLen {
|
||||||
|
counterLen = maxMapCounterLen
|
||||||
|
}
|
||||||
|
counter := make([]counterStripe, counterLen)
|
||||||
|
t := &mapOfTable[K, V]{
|
||||||
|
buckets: buckets,
|
||||||
|
size: counter,
|
||||||
|
seed: makeSeed(),
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToPlainMapOf returns a native map with a copy of xsync Map's
|
||||||
|
// contents. The copied xsync Map should not be modified while
|
||||||
|
// this call is made. If the copied Map is modified, the copying
|
||||||
|
// behavior is the same as in the Range method.
|
||||||
|
func ToPlainMapOf[K comparable, V any](m *MapOf[K, V]) map[K]V {
|
||||||
|
pm := make(map[K]V)
|
||||||
|
if m != nil {
|
||||||
|
m.Range(func(key K, value V) bool {
|
||||||
|
pm[key] = value
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return pm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load returns the value stored in the map for a key, or zero value
|
||||||
|
// of type V if no value is present.
|
||||||
|
// The ok result indicates whether value was found in the map.
|
||||||
|
func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
|
||||||
|
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||||
|
hash := m.hasher(key, table.seed)
|
||||||
|
h1 := h1(hash)
|
||||||
|
h2w := broadcast(h2(hash))
|
||||||
|
bidx := uint64(len(table.buckets)-1) & h1
|
||||||
|
b := &table.buckets[bidx]
|
||||||
|
for {
|
||||||
|
metaw := atomic.LoadUint64(&b.meta)
|
||||||
|
markedw := markZeroBytes(metaw^h2w) & metaMask
|
||||||
|
for markedw != 0 {
|
||||||
|
idx := firstMarkedByteIndex(markedw)
|
||||||
|
eptr := atomic.LoadPointer(&b.entries[idx])
|
||||||
|
if eptr != nil {
|
||||||
|
e := (*entryOf[K, V])(eptr)
|
||||||
|
if e.key == key {
|
||||||
|
return e.value, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
markedw &= markedw - 1
|
||||||
|
}
|
||||||
|
bptr := atomic.LoadPointer(&b.next)
|
||||||
|
if bptr == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = (*bucketOfPadded)(bptr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store sets the value for a key.
|
||||||
|
func (m *MapOf[K, V]) Store(key K, value V) {
|
||||||
|
m.doCompute(
|
||||||
|
key,
|
||||||
|
func(V, bool) (V, bool) {
|
||||||
|
return value, false
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOrStore returns the existing value for the key if present.
|
||||||
|
// Otherwise, it stores and returns the given value.
|
||||||
|
// The loaded result is true if the value was loaded, false if stored.
|
||||||
|
func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(V, bool) (V, bool) {
|
||||||
|
return value, false
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAndStore returns the existing value for the key if present,
|
||||||
|
// while setting the new value for the key.
|
||||||
|
// It stores the new value and returns the existing one, if present.
|
||||||
|
// The loaded result is true if the existing value was loaded,
|
||||||
|
// false otherwise.
|
||||||
|
func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(V, bool) (V, bool) {
|
||||||
|
return value, false
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOrCompute returns the existing value for the key if present.
|
||||||
|
// Otherwise, it computes the value using the provided function, and
|
||||||
|
// then stores and returns the computed value. The loaded result is
|
||||||
|
// true if the value was loaded, false if computed.
|
||||||
|
//
|
||||||
|
// This call locks a hash table bucket while the compute function
|
||||||
|
// is executed. It means that modifications on other entries in
|
||||||
|
// the bucket will be blocked until the valueFn executes. Consider
|
||||||
|
// this when the function includes long-running operations.
|
||||||
|
func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(V, bool) (V, bool) {
|
||||||
|
return valueFn(), false
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOrTryCompute returns the existing value for the key if present.
|
||||||
|
// Otherwise, it tries to compute the value using the provided function
|
||||||
|
// and, if successful, stores and returns the computed value. The loaded
|
||||||
|
// result is true if the value was loaded, or false if computed (whether
|
||||||
|
// successfully or not). If the compute attempt was cancelled (due to an
|
||||||
|
// error, for example), a zero value of type V will be returned.
|
||||||
|
//
|
||||||
|
// This call locks a hash table bucket while the compute function
|
||||||
|
// is executed. It means that modifications on other entries in
|
||||||
|
// the bucket will be blocked until the valueFn executes. Consider
|
||||||
|
// this when the function includes long-running operations.
|
||||||
|
func (m *MapOf[K, V]) LoadOrTryCompute(
|
||||||
|
key K,
|
||||||
|
valueFn func() (newValue V, cancel bool),
|
||||||
|
) (value V, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(V, bool) (V, bool) {
|
||||||
|
nv, c := valueFn()
|
||||||
|
if !c {
|
||||||
|
return nv, false
|
||||||
|
}
|
||||||
|
return nv, true // nv is ignored
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute either sets the computed new value for the key or deletes
|
||||||
|
// the value for the key. When the delete result of the valueFn function
|
||||||
|
// is set to true, the value will be deleted, if it exists. When delete
|
||||||
|
// is set to false, the value is updated to the newValue.
|
||||||
|
// The ok result indicates whether value was computed and stored, thus, is
|
||||||
|
// present in the map. The actual result contains the new value in cases where
|
||||||
|
// the value was computed and stored. See the example for a few use cases.
|
||||||
|
//
|
||||||
|
// This call locks a hash table bucket while the compute function
|
||||||
|
// is executed. It means that modifications on other entries in
|
||||||
|
// the bucket will be blocked until the valueFn executes. Consider
|
||||||
|
// this when the function includes long-running operations.
|
||||||
|
func (m *MapOf[K, V]) Compute(
|
||||||
|
key K,
|
||||||
|
valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
|
||||||
|
) (actual V, ok bool) {
|
||||||
|
return m.doCompute(key, valueFn, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadAndDelete deletes the value for a key, returning the previous
|
||||||
|
// value if any. The loaded result reports whether the key was
|
||||||
|
// present.
|
||||||
|
func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
|
||||||
|
return m.doCompute(
|
||||||
|
key,
|
||||||
|
func(value V, loaded bool) (V, bool) {
|
||||||
|
return value, true
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the value for a key.
|
||||||
|
func (m *MapOf[K, V]) Delete(key K) {
|
||||||
|
m.doCompute(
|
||||||
|
key,
|
||||||
|
func(value V, loaded bool) (V, bool) {
|
||||||
|
return value, true
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapOf[K, V]) doCompute(
|
||||||
|
key K,
|
||||||
|
valueFn func(oldValue V, loaded bool) (V, bool),
|
||||||
|
loadIfExists, computeOnly bool,
|
||||||
|
) (V, bool) {
|
||||||
|
// Read-only path.
|
||||||
|
if loadIfExists {
|
||||||
|
if v, ok := m.Load(key); ok {
|
||||||
|
return v, !computeOnly
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Write path.
|
||||||
|
for {
|
||||||
|
compute_attempt:
|
||||||
|
var (
|
||||||
|
emptyb *bucketOfPadded
|
||||||
|
emptyidx int
|
||||||
|
)
|
||||||
|
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||||
|
tableLen := len(table.buckets)
|
||||||
|
hash := m.hasher(key, table.seed)
|
||||||
|
h1 := h1(hash)
|
||||||
|
h2 := h2(hash)
|
||||||
|
h2w := broadcast(h2)
|
||||||
|
bidx := uint64(len(table.buckets)-1) & h1
|
||||||
|
rootb := &table.buckets[bidx]
|
||||||
|
rootb.mu.Lock()
|
||||||
|
// The following two checks must go in reverse to what's
|
||||||
|
// in the resize method.
|
||||||
|
if m.resizeInProgress() {
|
||||||
|
// Resize is in progress. Wait, then go for another attempt.
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
m.waitForResize()
|
||||||
|
goto compute_attempt
|
||||||
|
}
|
||||||
|
if m.newerTableExists(table) {
|
||||||
|
// Someone resized the table. Go for another attempt.
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
goto compute_attempt
|
||||||
|
}
|
||||||
|
b := rootb
|
||||||
|
for {
|
||||||
|
metaw := b.meta
|
||||||
|
markedw := markZeroBytes(metaw^h2w) & metaMask
|
||||||
|
for markedw != 0 {
|
||||||
|
idx := firstMarkedByteIndex(markedw)
|
||||||
|
eptr := b.entries[idx]
|
||||||
|
if eptr != nil {
|
||||||
|
e := (*entryOf[K, V])(eptr)
|
||||||
|
if e.key == key {
|
||||||
|
if loadIfExists {
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
return e.value, !computeOnly
|
||||||
|
}
|
||||||
|
// In-place update/delete.
|
||||||
|
// We get a copy of the value via an interface{} on each call,
|
||||||
|
// thus the live value pointers are unique. Otherwise atomic
|
||||||
|
// snapshot won't be correct in case of multiple Store calls
|
||||||
|
// using the same value.
|
||||||
|
oldv := e.value
|
||||||
|
newv, del := valueFn(oldv, true)
|
||||||
|
if del {
|
||||||
|
// Deletion.
|
||||||
|
// First we update the hash, then the entry.
|
||||||
|
newmetaw := setByte(metaw, emptyMetaSlot, idx)
|
||||||
|
atomic.StoreUint64(&b.meta, newmetaw)
|
||||||
|
atomic.StorePointer(&b.entries[idx], nil)
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
table.addSize(bidx, -1)
|
||||||
|
// Might need to shrink the table if we left bucket empty.
|
||||||
|
if newmetaw == defaultMeta {
|
||||||
|
m.resize(table, mapShrinkHint)
|
||||||
|
}
|
||||||
|
return oldv, !computeOnly
|
||||||
|
}
|
||||||
|
newe := new(entryOf[K, V])
|
||||||
|
newe.key = key
|
||||||
|
newe.value = newv
|
||||||
|
atomic.StorePointer(&b.entries[idx], unsafe.Pointer(newe))
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
if computeOnly {
|
||||||
|
// Compute expects the new value to be returned.
|
||||||
|
return newv, true
|
||||||
|
}
|
||||||
|
// LoadAndStore expects the old value to be returned.
|
||||||
|
return oldv, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
markedw &= markedw - 1
|
||||||
|
}
|
||||||
|
if emptyb == nil {
|
||||||
|
// Search for empty entries (up to 5 per bucket).
|
||||||
|
emptyw := metaw & defaultMetaMasked
|
||||||
|
if emptyw != 0 {
|
||||||
|
idx := firstMarkedByteIndex(emptyw)
|
||||||
|
emptyb = b
|
||||||
|
emptyidx = idx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
if emptyb != nil {
|
||||||
|
// Insertion into an existing bucket.
|
||||||
|
var zeroV V
|
||||||
|
newValue, del := valueFn(zeroV, false)
|
||||||
|
if del {
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
return zeroV, false
|
||||||
|
}
|
||||||
|
newe := new(entryOf[K, V])
|
||||||
|
newe.key = key
|
||||||
|
newe.value = newValue
|
||||||
|
// First we update meta, then the entry.
|
||||||
|
atomic.StoreUint64(&emptyb.meta, setByte(emptyb.meta, h2, emptyidx))
|
||||||
|
atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
table.addSize(bidx, 1)
|
||||||
|
return newValue, computeOnly
|
||||||
|
}
|
||||||
|
growThreshold := float64(tableLen) * entriesPerMapOfBucket * mapLoadFactor
|
||||||
|
if table.sumSize() > int64(growThreshold) {
|
||||||
|
// Need to grow the table. Then go for another attempt.
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
m.resize(table, mapGrowHint)
|
||||||
|
goto compute_attempt
|
||||||
|
}
|
||||||
|
// Insertion into a new bucket.
|
||||||
|
var zeroV V
|
||||||
|
newValue, del := valueFn(zeroV, false)
|
||||||
|
if del {
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
return newValue, false
|
||||||
|
}
|
||||||
|
// Create and append a bucket.
|
||||||
|
newb := new(bucketOfPadded)
|
||||||
|
newb.meta = setByte(defaultMeta, h2, 0)
|
||||||
|
newe := new(entryOf[K, V])
|
||||||
|
newe.key = key
|
||||||
|
newe.value = newValue
|
||||||
|
newb.entries[0] = unsafe.Pointer(newe)
|
||||||
|
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
table.addSize(bidx, 1)
|
||||||
|
return newValue, computeOnly
|
||||||
|
}
|
||||||
|
b = (*bucketOfPadded)(b.next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool {
|
||||||
|
curTablePtr := atomic.LoadPointer(&m.table)
|
||||||
|
return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapOf[K, V]) resizeInProgress() bool {
|
||||||
|
return atomic.LoadInt64(&m.resizing) == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapOf[K, V]) waitForResize() {
|
||||||
|
m.resizeMu.Lock()
|
||||||
|
for m.resizeInProgress() {
|
||||||
|
m.resizeCond.Wait()
|
||||||
|
}
|
||||||
|
m.resizeMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
|
||||||
|
knownTableLen := len(knownTable.buckets)
|
||||||
|
// Fast path for shrink attempts.
|
||||||
|
if hint == mapShrinkHint {
|
||||||
|
if m.growOnly ||
|
||||||
|
m.minTableLen == knownTableLen ||
|
||||||
|
knownTable.sumSize() > int64((knownTableLen*entriesPerMapOfBucket)/mapShrinkFraction) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Slow path.
|
||||||
|
if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
|
||||||
|
// Someone else started resize. Wait for it to finish.
|
||||||
|
m.waitForResize()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var newTable *mapOfTable[K, V]
|
||||||
|
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||||
|
tableLen := len(table.buckets)
|
||||||
|
switch hint {
|
||||||
|
case mapGrowHint:
|
||||||
|
// Grow the table with factor of 2.
|
||||||
|
atomic.AddInt64(&m.totalGrowths, 1)
|
||||||
|
newTable = newMapOfTable[K, V](tableLen << 1)
|
||||||
|
case mapShrinkHint:
|
||||||
|
shrinkThreshold := int64((tableLen * entriesPerMapOfBucket) / mapShrinkFraction)
|
||||||
|
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
|
||||||
|
// Shrink the table with factor of 2.
|
||||||
|
atomic.AddInt64(&m.totalShrinks, 1)
|
||||||
|
newTable = newMapOfTable[K, V](tableLen >> 1)
|
||||||
|
} else {
|
||||||
|
// No need to shrink. Wake up all waiters and give up.
|
||||||
|
m.resizeMu.Lock()
|
||||||
|
atomic.StoreInt64(&m.resizing, 0)
|
||||||
|
m.resizeCond.Broadcast()
|
||||||
|
m.resizeMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case mapClearHint:
|
||||||
|
newTable = newMapOfTable[K, V](m.minTableLen)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
|
||||||
|
}
|
||||||
|
// Copy the data only if we're not clearing the map.
|
||||||
|
if hint != mapClearHint {
|
||||||
|
for i := 0; i < tableLen; i++ {
|
||||||
|
copied := copyBucketOf(&table.buckets[i], newTable, m.hasher)
|
||||||
|
newTable.addSizePlain(uint64(i), copied)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Publish the new table and wake up all waiters.
|
||||||
|
atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
|
||||||
|
m.resizeMu.Lock()
|
||||||
|
atomic.StoreInt64(&m.resizing, 0)
|
||||||
|
m.resizeCond.Broadcast()
|
||||||
|
m.resizeMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyBucketOf[K comparable, V any](
|
||||||
|
b *bucketOfPadded,
|
||||||
|
destTable *mapOfTable[K, V],
|
||||||
|
hasher func(K, uint64) uint64,
|
||||||
|
) (copied int) {
|
||||||
|
rootb := b
|
||||||
|
rootb.mu.Lock()
|
||||||
|
for {
|
||||||
|
for i := 0; i < entriesPerMapOfBucket; i++ {
|
||||||
|
if b.entries[i] != nil {
|
||||||
|
e := (*entryOf[K, V])(b.entries[i])
|
||||||
|
hash := hasher(e.key, destTable.seed)
|
||||||
|
bidx := uint64(len(destTable.buckets)-1) & h1(hash)
|
||||||
|
destb := &destTable.buckets[bidx]
|
||||||
|
appendToBucketOf(h2(hash), b.entries[i], destb)
|
||||||
|
copied++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = (*bucketOfPadded)(b.next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range calls f sequentially for each key and value present in the
|
||||||
|
// map. If f returns false, range stops the iteration.
|
||||||
|
//
|
||||||
|
// Range does not necessarily correspond to any consistent snapshot
|
||||||
|
// of the Map's contents: no key will be visited more than once, but
|
||||||
|
// if the value for any key is stored or deleted concurrently, Range
|
||||||
|
// may reflect any mapping for that key from any point during the
|
||||||
|
// Range call.
|
||||||
|
//
|
||||||
|
// It is safe to modify the map while iterating it, including entry
|
||||||
|
// creation, modification and deletion. However, the concurrent
|
||||||
|
// modification rule apply, i.e. the changes may be not reflected
|
||||||
|
// in the subsequently iterated entries.
|
||||||
|
func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
|
||||||
|
var zeroPtr unsafe.Pointer
|
||||||
|
// Pre-allocate array big enough to fit entries for most hash tables.
|
||||||
|
bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapOfBucket)
|
||||||
|
tablep := atomic.LoadPointer(&m.table)
|
||||||
|
table := *(*mapOfTable[K, V])(tablep)
|
||||||
|
for i := range table.buckets {
|
||||||
|
rootb := &table.buckets[i]
|
||||||
|
b := rootb
|
||||||
|
// Prevent concurrent modifications and copy all entries into
|
||||||
|
// the intermediate slice.
|
||||||
|
rootb.mu.Lock()
|
||||||
|
for {
|
||||||
|
for i := 0; i < entriesPerMapOfBucket; i++ {
|
||||||
|
if b.entries[i] != nil {
|
||||||
|
bentries = append(bentries, b.entries[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
rootb.mu.Unlock()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
b = (*bucketOfPadded)(b.next)
|
||||||
|
}
|
||||||
|
// Call the function for all copied entries.
|
||||||
|
for j := range bentries {
|
||||||
|
entry := (*entryOf[K, V])(bentries[j])
|
||||||
|
if !f(entry.key, entry.value) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Remove the reference to avoid preventing the copied
|
||||||
|
// entries from being GCed until this method finishes.
|
||||||
|
bentries[j] = zeroPtr
|
||||||
|
}
|
||||||
|
bentries = bentries[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear deletes all keys and values currently stored in the map.
|
||||||
|
func (m *MapOf[K, V]) Clear() {
|
||||||
|
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||||
|
m.resize(table, mapClearHint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns current size of the map.
|
||||||
|
func (m *MapOf[K, V]) Size() int {
|
||||||
|
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||||
|
return int(table.sumSize())
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendToBucketOf(h2 uint8, entryPtr unsafe.Pointer, b *bucketOfPadded) {
|
||||||
|
for {
|
||||||
|
for i := 0; i < entriesPerMapOfBucket; i++ {
|
||||||
|
if b.entries[i] == nil {
|
||||||
|
b.meta = setByte(b.meta, h2, i)
|
||||||
|
b.entries[i] = entryPtr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
newb := new(bucketOfPadded)
|
||||||
|
newb.meta = setByte(defaultMeta, h2, 0)
|
||||||
|
newb.entries[0] = entryPtr
|
||||||
|
b.next = unsafe.Pointer(newb)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b = (*bucketOfPadded)(b.next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) {
|
||||||
|
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||||
|
atomic.AddInt64(&table.size[cidx].c, int64(delta))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
|
||||||
|
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||||
|
table.size[cidx].c += int64(delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (table *mapOfTable[K, V]) sumSize() int64 {
|
||||||
|
sum := int64(0)
|
||||||
|
for i := range table.size {
|
||||||
|
sum += atomic.LoadInt64(&table.size[i].c)
|
||||||
|
}
|
||||||
|
return sum
|
||||||
|
}
|
||||||
|
|
||||||
|
func h1(h uint64) uint64 {
|
||||||
|
return h >> 7
|
||||||
|
}
|
||||||
|
|
||||||
|
func h2(h uint64) uint8 {
|
||||||
|
return uint8(h & 0x7f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns statistics for the MapOf. Just like other map
|
||||||
|
// methods, this one is thread-safe. Yet it's an O(N) operation,
|
||||||
|
// so it should be used only for diagnostics or debugging purposes.
|
||||||
|
func (m *MapOf[K, V]) Stats() MapStats {
|
||||||
|
stats := MapStats{
|
||||||
|
TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
|
||||||
|
TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
|
||||||
|
MinEntries: math.MaxInt32,
|
||||||
|
}
|
||||||
|
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||||
|
stats.RootBuckets = len(table.buckets)
|
||||||
|
stats.Counter = int(table.sumSize())
|
||||||
|
stats.CounterLen = len(table.size)
|
||||||
|
for i := range table.buckets {
|
||||||
|
nentries := 0
|
||||||
|
b := &table.buckets[i]
|
||||||
|
stats.TotalBuckets++
|
||||||
|
for {
|
||||||
|
nentriesLocal := 0
|
||||||
|
stats.Capacity += entriesPerMapOfBucket
|
||||||
|
for i := 0; i < entriesPerMapOfBucket; i++ {
|
||||||
|
if atomic.LoadPointer(&b.entries[i]) != nil {
|
||||||
|
stats.Size++
|
||||||
|
nentriesLocal++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nentries += nentriesLocal
|
||||||
|
if nentriesLocal == 0 {
|
||||||
|
stats.EmptyBuckets++
|
||||||
|
}
|
||||||
|
if b.next == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
b = (*bucketOfPadded)(atomic.LoadPointer(&b.next))
|
||||||
|
stats.TotalBuckets++
|
||||||
|
}
|
||||||
|
if nentries < stats.MinEntries {
|
||||||
|
stats.MinEntries = nentries
|
||||||
|
}
|
||||||
|
if nentries > stats.MaxEntries {
|
||||||
|
stats.MaxEntries = nentries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return stats
|
||||||
|
}
|
||||||
125
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go
generated
vendored
Normal file
125
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A MPMCQueue is a bounded multi-producer multi-consumer concurrent
|
||||||
|
// queue.
|
||||||
|
//
|
||||||
|
// MPMCQueue instances must be created with NewMPMCQueue function.
|
||||||
|
// A MPMCQueue must not be copied after first use.
|
||||||
|
//
|
||||||
|
// Based on the data structure from the following C++ library:
|
||||||
|
// https://github.com/rigtorp/MPMCQueue
|
||||||
|
type MPMCQueue struct {
|
||||||
|
cap uint64
|
||||||
|
head uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
hpad [cacheLineSize - 8]byte
|
||||||
|
tail uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
tpad [cacheLineSize - 8]byte
|
||||||
|
slots []slotPadded
|
||||||
|
}
|
||||||
|
|
||||||
|
type slotPadded struct {
|
||||||
|
slot
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad [cacheLineSize - unsafe.Sizeof(slot{})]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type slot struct {
|
||||||
|
turn uint64
|
||||||
|
item interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMPMCQueue creates a new MPMCQueue instance with the given
|
||||||
|
// capacity.
|
||||||
|
func NewMPMCQueue(capacity int) *MPMCQueue {
|
||||||
|
if capacity < 1 {
|
||||||
|
panic("capacity must be positive number")
|
||||||
|
}
|
||||||
|
return &MPMCQueue{
|
||||||
|
cap: uint64(capacity),
|
||||||
|
slots: make([]slotPadded, capacity),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enqueue inserts the given item into the queue.
|
||||||
|
// Blocks, if the queue is full.
|
||||||
|
//
|
||||||
|
// Deprecated: use TryEnqueue in combination with runtime.Gosched().
|
||||||
|
func (q *MPMCQueue) Enqueue(item interface{}) {
|
||||||
|
head := atomic.AddUint64(&q.head, 1) - 1
|
||||||
|
slot := &q.slots[q.idx(head)]
|
||||||
|
turn := q.turn(head) * 2
|
||||||
|
for atomic.LoadUint64(&slot.turn) != turn {
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
slot.item = item
|
||||||
|
atomic.StoreUint64(&slot.turn, turn+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dequeue retrieves and removes the item from the head of the queue.
|
||||||
|
// Blocks, if the queue is empty.
|
||||||
|
//
|
||||||
|
// Deprecated: use TryDequeue in combination with runtime.Gosched().
|
||||||
|
func (q *MPMCQueue) Dequeue() interface{} {
|
||||||
|
tail := atomic.AddUint64(&q.tail, 1) - 1
|
||||||
|
slot := &q.slots[q.idx(tail)]
|
||||||
|
turn := q.turn(tail)*2 + 1
|
||||||
|
for atomic.LoadUint64(&slot.turn) != turn {
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
item := slot.item
|
||||||
|
slot.item = nil
|
||||||
|
atomic.StoreUint64(&slot.turn, turn+1)
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryEnqueue inserts the given item into the queue. Does not block
|
||||||
|
// and returns immediately. The result indicates that the queue isn't
|
||||||
|
// full and the item was inserted.
|
||||||
|
func (q *MPMCQueue) TryEnqueue(item interface{}) bool {
|
||||||
|
head := atomic.LoadUint64(&q.head)
|
||||||
|
slot := &q.slots[q.idx(head)]
|
||||||
|
turn := q.turn(head) * 2
|
||||||
|
if atomic.LoadUint64(&slot.turn) == turn {
|
||||||
|
if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
|
||||||
|
slot.item = item
|
||||||
|
atomic.StoreUint64(&slot.turn, turn+1)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryDequeue retrieves and removes the item from the head of the
|
||||||
|
// queue. Does not block and returns immediately. The ok result
|
||||||
|
// indicates that the queue isn't empty and an item was retrieved.
|
||||||
|
func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) {
|
||||||
|
tail := atomic.LoadUint64(&q.tail)
|
||||||
|
slot := &q.slots[q.idx(tail)]
|
||||||
|
turn := q.turn(tail)*2 + 1
|
||||||
|
if atomic.LoadUint64(&slot.turn) == turn {
|
||||||
|
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
|
||||||
|
item = slot.item
|
||||||
|
ok = true
|
||||||
|
slot.item = nil
|
||||||
|
atomic.StoreUint64(&slot.turn, turn+1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *MPMCQueue) idx(i uint64) uint64 {
|
||||||
|
return i % q.cap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *MPMCQueue) turn(i uint64) uint64 {
|
||||||
|
return i / q.cap
|
||||||
|
}
|
||||||
138
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go
generated
vendored
Normal file
138
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
//go:build go1.19
|
||||||
|
// +build go1.19
|
||||||
|
|
||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A MPMCQueueOf is a bounded multi-producer multi-consumer concurrent
|
||||||
|
// queue. It's a generic version of MPMCQueue.
|
||||||
|
//
|
||||||
|
// MPMCQueueOf instances must be created with NewMPMCQueueOf function.
|
||||||
|
// A MPMCQueueOf must not be copied after first use.
|
||||||
|
//
|
||||||
|
// Based on the data structure from the following C++ library:
|
||||||
|
// https://github.com/rigtorp/MPMCQueue
|
||||||
|
type MPMCQueueOf[I any] struct {
|
||||||
|
cap uint64
|
||||||
|
head uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
hpad [cacheLineSize - 8]byte
|
||||||
|
tail uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
tpad [cacheLineSize - 8]byte
|
||||||
|
slots []slotOfPadded[I]
|
||||||
|
}
|
||||||
|
|
||||||
|
type slotOfPadded[I any] struct {
|
||||||
|
slotOf[I]
|
||||||
|
// Unfortunately, proper padding like the below one:
|
||||||
|
//
|
||||||
|
// pad [cacheLineSize - (unsafe.Sizeof(slotOf[I]{}) % cacheLineSize)]byte
|
||||||
|
//
|
||||||
|
// won't compile, so here we add a best-effort padding for items up to
|
||||||
|
// 56 bytes size.
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad [cacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type slotOf[I any] struct {
|
||||||
|
// atomic.Uint64 is used here to get proper 8 byte alignment on
|
||||||
|
// 32-bit archs.
|
||||||
|
turn atomic.Uint64
|
||||||
|
item I
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMPMCQueueOf creates a new MPMCQueueOf instance with the given
|
||||||
|
// capacity.
|
||||||
|
func NewMPMCQueueOf[I any](capacity int) *MPMCQueueOf[I] {
|
||||||
|
if capacity < 1 {
|
||||||
|
panic("capacity must be positive number")
|
||||||
|
}
|
||||||
|
return &MPMCQueueOf[I]{
|
||||||
|
cap: uint64(capacity),
|
||||||
|
slots: make([]slotOfPadded[I], capacity),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enqueue inserts the given item into the queue.
|
||||||
|
// Blocks, if the queue is full.
|
||||||
|
//
|
||||||
|
// Deprecated: use TryEnqueue in combination with runtime.Gosched().
|
||||||
|
func (q *MPMCQueueOf[I]) Enqueue(item I) {
|
||||||
|
head := atomic.AddUint64(&q.head, 1) - 1
|
||||||
|
slot := &q.slots[q.idx(head)]
|
||||||
|
turn := q.turn(head) * 2
|
||||||
|
for slot.turn.Load() != turn {
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
slot.item = item
|
||||||
|
slot.turn.Store(turn + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dequeue retrieves and removes the item from the head of the queue.
|
||||||
|
// Blocks, if the queue is empty.
|
||||||
|
//
|
||||||
|
// Deprecated: use TryDequeue in combination with runtime.Gosched().
|
||||||
|
func (q *MPMCQueueOf[I]) Dequeue() I {
|
||||||
|
var zeroI I
|
||||||
|
tail := atomic.AddUint64(&q.tail, 1) - 1
|
||||||
|
slot := &q.slots[q.idx(tail)]
|
||||||
|
turn := q.turn(tail)*2 + 1
|
||||||
|
for slot.turn.Load() != turn {
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
item := slot.item
|
||||||
|
slot.item = zeroI
|
||||||
|
slot.turn.Store(turn + 1)
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryEnqueue inserts the given item into the queue. Does not block
|
||||||
|
// and returns immediately. The result indicates that the queue isn't
|
||||||
|
// full and the item was inserted.
|
||||||
|
func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool {
|
||||||
|
head := atomic.LoadUint64(&q.head)
|
||||||
|
slot := &q.slots[q.idx(head)]
|
||||||
|
turn := q.turn(head) * 2
|
||||||
|
if slot.turn.Load() == turn {
|
||||||
|
if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
|
||||||
|
slot.item = item
|
||||||
|
slot.turn.Store(turn + 1)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryDequeue retrieves and removes the item from the head of the
|
||||||
|
// queue. Does not block and returns immediately. The ok result
|
||||||
|
// indicates that the queue isn't empty and an item was retrieved.
|
||||||
|
func (q *MPMCQueueOf[I]) TryDequeue() (item I, ok bool) {
|
||||||
|
tail := atomic.LoadUint64(&q.tail)
|
||||||
|
slot := &q.slots[q.idx(tail)]
|
||||||
|
turn := q.turn(tail)*2 + 1
|
||||||
|
if slot.turn.Load() == turn {
|
||||||
|
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
|
||||||
|
var zeroI I
|
||||||
|
item = slot.item
|
||||||
|
ok = true
|
||||||
|
slot.item = zeroI
|
||||||
|
slot.turn.Store(turn + 1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *MPMCQueueOf[I]) idx(i uint64) uint64 {
|
||||||
|
return i % q.cap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *MPMCQueueOf[I]) turn(i uint64) uint64 {
|
||||||
|
return i / q.cap
|
||||||
|
}
|
||||||
188
vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go
generated
vendored
Normal file
188
vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// slow-down guard
|
||||||
|
const nslowdown = 7
|
||||||
|
|
||||||
|
// pool for reader tokens
|
||||||
|
var rtokenPool sync.Pool
|
||||||
|
|
||||||
|
// RToken is a reader lock token.
|
||||||
|
type RToken struct {
|
||||||
|
slot uint32
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad [cacheLineSize - 4]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RBMutex is a reader biased reader/writer mutual exclusion lock.
|
||||||
|
// The lock can be held by an many readers or a single writer.
|
||||||
|
// The zero value for a RBMutex is an unlocked mutex.
|
||||||
|
//
|
||||||
|
// A RBMutex must not be copied after first use.
|
||||||
|
//
|
||||||
|
// RBMutex is based on a modified version of BRAVO
|
||||||
|
// (Biased Locking for Reader-Writer Locks) algorithm:
|
||||||
|
// https://arxiv.org/pdf/1810.01553.pdf
|
||||||
|
//
|
||||||
|
// RBMutex is a specialized mutex for scenarios, such as caches,
|
||||||
|
// where the vast majority of locks are acquired by readers and write
|
||||||
|
// lock acquire attempts are infrequent. In such scenarios, RBMutex
|
||||||
|
// performs better than sync.RWMutex on large multicore machines.
|
||||||
|
//
|
||||||
|
// RBMutex extends sync.RWMutex internally and uses it as the "reader
|
||||||
|
// bias disabled" fallback, so the same semantics apply. The only
|
||||||
|
// noticeable difference is in reader tokens returned from the
|
||||||
|
// RLock/RUnlock methods.
|
||||||
|
type RBMutex struct {
|
||||||
|
rslots []rslot
|
||||||
|
rmask uint32
|
||||||
|
rbias int32
|
||||||
|
inhibitUntil time.Time
|
||||||
|
rw sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type rslot struct {
|
||||||
|
mu int32
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad [cacheLineSize - 4]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRBMutex creates a new RBMutex instance.
|
||||||
|
func NewRBMutex() *RBMutex {
|
||||||
|
nslots := nextPowOf2(parallelism())
|
||||||
|
mu := RBMutex{
|
||||||
|
rslots: make([]rslot, nslots),
|
||||||
|
rmask: nslots - 1,
|
||||||
|
rbias: 1,
|
||||||
|
}
|
||||||
|
return &mu
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryRLock tries to lock m for reading without blocking.
|
||||||
|
// When TryRLock succeeds, it returns true and a reader token.
|
||||||
|
// In case of a failure, a false is returned.
|
||||||
|
func (mu *RBMutex) TryRLock() (bool, *RToken) {
|
||||||
|
if t := mu.fastRlock(); t != nil {
|
||||||
|
return true, t
|
||||||
|
}
|
||||||
|
// Optimistic slow path.
|
||||||
|
if mu.rw.TryRLock() {
|
||||||
|
if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
|
||||||
|
atomic.StoreInt32(&mu.rbias, 1)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RLock locks m for reading and returns a reader token. The
|
||||||
|
// token must be used in the later RUnlock call.
|
||||||
|
//
|
||||||
|
// Should not be used for recursive read locking; a blocked Lock
|
||||||
|
// call excludes new readers from acquiring the lock.
|
||||||
|
func (mu *RBMutex) RLock() *RToken {
|
||||||
|
if t := mu.fastRlock(); t != nil {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
// Slow path.
|
||||||
|
mu.rw.RLock()
|
||||||
|
if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
|
||||||
|
atomic.StoreInt32(&mu.rbias, 1)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mu *RBMutex) fastRlock() *RToken {
|
||||||
|
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||||
|
t, ok := rtokenPool.Get().(*RToken)
|
||||||
|
if !ok {
|
||||||
|
t = new(RToken)
|
||||||
|
t.slot = runtime_fastrand()
|
||||||
|
}
|
||||||
|
// Try all available slots to distribute reader threads to slots.
|
||||||
|
for i := 0; i < len(mu.rslots); i++ {
|
||||||
|
slot := t.slot + uint32(i)
|
||||||
|
rslot := &mu.rslots[slot&mu.rmask]
|
||||||
|
rslotmu := atomic.LoadInt32(&rslot.mu)
|
||||||
|
if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) {
|
||||||
|
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||||
|
// Hot path succeeded.
|
||||||
|
t.slot = slot
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
// The mutex is no longer reader biased. Roll back.
|
||||||
|
atomic.AddInt32(&rslot.mu, -1)
|
||||||
|
rtokenPool.Put(t)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Contention detected. Give a try with the next slot.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RUnlock undoes a single RLock call. A reader token obtained from
|
||||||
|
// the RLock call must be provided. RUnlock does not affect other
|
||||||
|
// simultaneous readers. A panic is raised if m is not locked for
|
||||||
|
// reading on entry to RUnlock.
|
||||||
|
func (mu *RBMutex) RUnlock(t *RToken) {
|
||||||
|
if t == nil {
|
||||||
|
mu.rw.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 {
|
||||||
|
panic("invalid reader state detected")
|
||||||
|
}
|
||||||
|
rtokenPool.Put(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryLock tries to lock m for writing without blocking.
|
||||||
|
func (mu *RBMutex) TryLock() bool {
|
||||||
|
if mu.rw.TryLock() {
|
||||||
|
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||||
|
atomic.StoreInt32(&mu.rbias, 0)
|
||||||
|
for i := 0; i < len(mu.rslots); i++ {
|
||||||
|
if atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
|
||||||
|
// There is a reader. Roll back.
|
||||||
|
atomic.StoreInt32(&mu.rbias, 1)
|
||||||
|
mu.rw.Unlock()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock locks m for writing. If the lock is already locked for
|
||||||
|
// reading or writing, Lock blocks until the lock is available.
|
||||||
|
func (mu *RBMutex) Lock() {
|
||||||
|
mu.rw.Lock()
|
||||||
|
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||||
|
atomic.StoreInt32(&mu.rbias, 0)
|
||||||
|
start := time.Now()
|
||||||
|
for i := 0; i < len(mu.rslots); i++ {
|
||||||
|
for atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock unlocks m for writing. A panic is raised if m is not locked
|
||||||
|
// for writing on entry to Unlock.
|
||||||
|
//
|
||||||
|
// As with RWMutex, a locked RBMutex is not associated with a
|
||||||
|
// particular goroutine. One goroutine may RLock (Lock) a RBMutex and
|
||||||
|
// then arrange for another goroutine to RUnlock (Unlock) it.
|
||||||
|
func (mu *RBMutex) Unlock() {
|
||||||
|
mu.rw.Unlock()
|
||||||
|
}
|
||||||
92
vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go
generated
vendored
Normal file
92
vendor/github.com/puzpuzpuz/xsync/v3/spscqueue.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A SPSCQueue is a bounded single-producer single-consumer concurrent
|
||||||
|
// queue. This means that not more than a single goroutine must be
|
||||||
|
// publishing items to the queue while not more than a single goroutine
|
||||||
|
// must be consuming those items.
|
||||||
|
//
|
||||||
|
// SPSCQueue instances must be created with NewSPSCQueue function.
|
||||||
|
// A SPSCQueue must not be copied after first use.
|
||||||
|
//
|
||||||
|
// Based on the data structure from the following article:
|
||||||
|
// https://rigtorp.se/ringbuffer/
|
||||||
|
type SPSCQueue struct {
|
||||||
|
cap uint64
|
||||||
|
pidx uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad0 [cacheLineSize - 8]byte
|
||||||
|
pcachedIdx uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad1 [cacheLineSize - 8]byte
|
||||||
|
cidx uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad2 [cacheLineSize - 8]byte
|
||||||
|
ccachedIdx uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad3 [cacheLineSize - 8]byte
|
||||||
|
items []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSPSCQueue creates a new SPSCQueue instance with the given
|
||||||
|
// capacity.
|
||||||
|
func NewSPSCQueue(capacity int) *SPSCQueue {
|
||||||
|
if capacity < 1 {
|
||||||
|
panic("capacity must be positive number")
|
||||||
|
}
|
||||||
|
return &SPSCQueue{
|
||||||
|
cap: uint64(capacity + 1),
|
||||||
|
items: make([]interface{}, capacity+1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryEnqueue inserts the given item into the queue. Does not block
|
||||||
|
// and returns immediately. The result indicates that the queue isn't
|
||||||
|
// full and the item was inserted.
|
||||||
|
func (q *SPSCQueue) TryEnqueue(item interface{}) bool {
|
||||||
|
// relaxed memory order would be enough here
|
||||||
|
idx := atomic.LoadUint64(&q.pidx)
|
||||||
|
nextIdx := idx + 1
|
||||||
|
if nextIdx == q.cap {
|
||||||
|
nextIdx = 0
|
||||||
|
}
|
||||||
|
cachedIdx := q.ccachedIdx
|
||||||
|
if nextIdx == cachedIdx {
|
||||||
|
cachedIdx = atomic.LoadUint64(&q.cidx)
|
||||||
|
q.ccachedIdx = cachedIdx
|
||||||
|
if nextIdx == cachedIdx {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
q.items[idx] = item
|
||||||
|
atomic.StoreUint64(&q.pidx, nextIdx)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryDequeue retrieves and removes the item from the head of the
|
||||||
|
// queue. Does not block and returns immediately. The ok result
|
||||||
|
// indicates that the queue isn't empty and an item was retrieved.
|
||||||
|
func (q *SPSCQueue) TryDequeue() (item interface{}, ok bool) {
|
||||||
|
// relaxed memory order would be enough here
|
||||||
|
idx := atomic.LoadUint64(&q.cidx)
|
||||||
|
cachedIdx := q.pcachedIdx
|
||||||
|
if idx == cachedIdx {
|
||||||
|
cachedIdx = atomic.LoadUint64(&q.pidx)
|
||||||
|
q.pcachedIdx = cachedIdx
|
||||||
|
if idx == cachedIdx {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
item = q.items[idx]
|
||||||
|
q.items[idx] = nil
|
||||||
|
ok = true
|
||||||
|
nextIdx := idx + 1
|
||||||
|
if nextIdx == q.cap {
|
||||||
|
nextIdx = 0
|
||||||
|
}
|
||||||
|
atomic.StoreUint64(&q.cidx, nextIdx)
|
||||||
|
return
|
||||||
|
}
|
||||||
96
vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go
generated
vendored
Normal file
96
vendor/github.com/puzpuzpuz/xsync/v3/spscqueueof.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
//go:build go1.19
|
||||||
|
// +build go1.19
|
||||||
|
|
||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A SPSCQueueOf is a bounded single-producer single-consumer concurrent
|
||||||
|
// queue. This means that not more than a single goroutine must be
|
||||||
|
// publishing items to the queue while not more than a single goroutine
|
||||||
|
// must be consuming those items.
|
||||||
|
//
|
||||||
|
// SPSCQueueOf instances must be created with NewSPSCQueueOf function.
|
||||||
|
// A SPSCQueueOf must not be copied after first use.
|
||||||
|
//
|
||||||
|
// Based on the data structure from the following article:
|
||||||
|
// https://rigtorp.se/ringbuffer/
|
||||||
|
type SPSCQueueOf[I any] struct {
|
||||||
|
cap uint64
|
||||||
|
pidx uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad0 [cacheLineSize - 8]byte
|
||||||
|
pcachedIdx uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad1 [cacheLineSize - 8]byte
|
||||||
|
cidx uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad2 [cacheLineSize - 8]byte
|
||||||
|
ccachedIdx uint64
|
||||||
|
//lint:ignore U1000 prevents false sharing
|
||||||
|
pad3 [cacheLineSize - 8]byte
|
||||||
|
items []I
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSPSCQueueOf creates a new SPSCQueueOf instance with the given
|
||||||
|
// capacity.
|
||||||
|
func NewSPSCQueueOf[I any](capacity int) *SPSCQueueOf[I] {
|
||||||
|
if capacity < 1 {
|
||||||
|
panic("capacity must be positive number")
|
||||||
|
}
|
||||||
|
return &SPSCQueueOf[I]{
|
||||||
|
cap: uint64(capacity + 1),
|
||||||
|
items: make([]I, capacity+1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryEnqueue inserts the given item into the queue. Does not block
|
||||||
|
// and returns immediately. The result indicates that the queue isn't
|
||||||
|
// full and the item was inserted.
|
||||||
|
func (q *SPSCQueueOf[I]) TryEnqueue(item I) bool {
|
||||||
|
// relaxed memory order would be enough here
|
||||||
|
idx := atomic.LoadUint64(&q.pidx)
|
||||||
|
next_idx := idx + 1
|
||||||
|
if next_idx == q.cap {
|
||||||
|
next_idx = 0
|
||||||
|
}
|
||||||
|
cached_idx := q.ccachedIdx
|
||||||
|
if next_idx == cached_idx {
|
||||||
|
cached_idx = atomic.LoadUint64(&q.cidx)
|
||||||
|
q.ccachedIdx = cached_idx
|
||||||
|
if next_idx == cached_idx {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
q.items[idx] = item
|
||||||
|
atomic.StoreUint64(&q.pidx, next_idx)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryDequeue retrieves and removes the item from the head of the
|
||||||
|
// queue. Does not block and returns immediately. The ok result
|
||||||
|
// indicates that the queue isn't empty and an item was retrieved.
|
||||||
|
func (q *SPSCQueueOf[I]) TryDequeue() (item I, ok bool) {
|
||||||
|
// relaxed memory order would be enough here
|
||||||
|
idx := atomic.LoadUint64(&q.cidx)
|
||||||
|
cached_idx := q.pcachedIdx
|
||||||
|
if idx == cached_idx {
|
||||||
|
cached_idx = atomic.LoadUint64(&q.pidx)
|
||||||
|
q.pcachedIdx = cached_idx
|
||||||
|
if idx == cached_idx {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var zeroI I
|
||||||
|
item = q.items[idx]
|
||||||
|
q.items[idx] = zeroI
|
||||||
|
ok = true
|
||||||
|
next_idx := idx + 1
|
||||||
|
if next_idx == q.cap {
|
||||||
|
next_idx = 0
|
||||||
|
}
|
||||||
|
atomic.StoreUint64(&q.cidx, next_idx)
|
||||||
|
return
|
||||||
|
}
|
||||||
66
vendor/github.com/puzpuzpuz/xsync/v3/util.go
generated
vendored
Normal file
66
vendor/github.com/puzpuzpuz/xsync/v3/util.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/bits"
|
||||||
|
"runtime"
|
||||||
|
_ "unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// test-only assert()-like flag
|
||||||
|
var assertionsEnabled = false
|
||||||
|
|
||||||
|
const (
|
||||||
|
// cacheLineSize is used in paddings to prevent false sharing;
|
||||||
|
// 64B are used instead of 128B as a compromise between
|
||||||
|
// memory footprint and performance; 128B usage may give ~30%
|
||||||
|
// improvement on NUMA machines.
|
||||||
|
cacheLineSize = 64
|
||||||
|
)
|
||||||
|
|
||||||
|
// nextPowOf2 computes the next highest power of 2 of 32-bit v.
|
||||||
|
// Source: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
|
||||||
|
func nextPowOf2(v uint32) uint32 {
|
||||||
|
if v == 0 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
v--
|
||||||
|
v |= v >> 1
|
||||||
|
v |= v >> 2
|
||||||
|
v |= v >> 4
|
||||||
|
v |= v >> 8
|
||||||
|
v |= v >> 16
|
||||||
|
v++
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func parallelism() uint32 {
|
||||||
|
maxProcs := uint32(runtime.GOMAXPROCS(0))
|
||||||
|
numCores := uint32(runtime.NumCPU())
|
||||||
|
if maxProcs < numCores {
|
||||||
|
return maxProcs
|
||||||
|
}
|
||||||
|
return numCores
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname runtime_fastrand runtime.fastrand
|
||||||
|
func runtime_fastrand() uint32
|
||||||
|
|
||||||
|
func broadcast(b uint8) uint64 {
|
||||||
|
return 0x101010101010101 * uint64(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func firstMarkedByteIndex(w uint64) int {
|
||||||
|
return bits.TrailingZeros64(w) >> 3
|
||||||
|
}
|
||||||
|
|
||||||
|
// SWAR byte search: may produce false positives, e.g. for 0x0100,
|
||||||
|
// so make sure to double-check bytes found by this function.
|
||||||
|
func markZeroBytes(w uint64) uint64 {
|
||||||
|
return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setByte(w uint64, b uint8, idx int) uint64 {
|
||||||
|
shift := idx << 3
|
||||||
|
return (w &^ (0xff << shift)) | (uint64(b) << shift)
|
||||||
|
}
|
||||||
77
vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go
generated
vendored
Normal file
77
vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package xsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// makeSeed creates a random seed.
|
||||||
|
func makeSeed() uint64 {
|
||||||
|
var s1 uint32
|
||||||
|
for {
|
||||||
|
s1 = runtime_fastrand()
|
||||||
|
// We use seed 0 to indicate an uninitialized seed/hash,
|
||||||
|
// so keep trying until we get a non-zero seed.
|
||||||
|
if s1 != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s2 := runtime_fastrand()
|
||||||
|
return uint64(s1)<<32 | uint64(s2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashString calculates a hash of s with the given seed.
|
||||||
|
func hashString(s string, seed uint64) uint64 {
|
||||||
|
if s == "" {
|
||||||
|
return seed
|
||||||
|
}
|
||||||
|
strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||||
|
return uint64(runtime_memhash(unsafe.Pointer(strh.Data), uintptr(seed), uintptr(strh.Len)))
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname runtime_memhash runtime.memhash
|
||||||
|
func runtime_memhash(p unsafe.Pointer, h, s uintptr) uintptr
|
||||||
|
|
||||||
|
// defaultHasher creates a fast hash function for the given comparable type.
|
||||||
|
// The only limitation is that the type should not contain interfaces inside
|
||||||
|
// based on runtime.typehash.
|
||||||
|
func defaultHasher[T comparable]() func(T, uint64) uint64 {
|
||||||
|
var zero T
|
||||||
|
|
||||||
|
if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface {
|
||||||
|
return func(value T, seed uint64) uint64 {
|
||||||
|
iValue := any(value)
|
||||||
|
i := (*iface)(unsafe.Pointer(&iValue))
|
||||||
|
return runtime_typehash64(i.typ, i.word, seed)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var iZero any = zero
|
||||||
|
i := (*iface)(unsafe.Pointer(&iZero))
|
||||||
|
return func(value T, seed uint64) uint64 {
|
||||||
|
return runtime_typehash64(i.typ, unsafe.Pointer(&value), seed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// how interface is represented in memory
|
||||||
|
type iface struct {
|
||||||
|
typ uintptr
|
||||||
|
word unsafe.Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
// same as runtime_typehash, but always returns a uint64
|
||||||
|
// see: maphash.rthash function for details
|
||||||
|
func runtime_typehash64(t uintptr, p unsafe.Pointer, seed uint64) uint64 {
|
||||||
|
if unsafe.Sizeof(uintptr(0)) == 8 {
|
||||||
|
return uint64(runtime_typehash(t, p, uintptr(seed)))
|
||||||
|
}
|
||||||
|
|
||||||
|
lo := runtime_typehash(t, p, uintptr(seed))
|
||||||
|
hi := runtime_typehash(t, p, uintptr(seed>>32))
|
||||||
|
return uint64(hi)<<32 | uint64(lo)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname runtime_typehash runtime.typehash
|
||||||
|
func runtime_typehash(t uintptr, p unsafe.Pointer, h uintptr) uintptr
|
||||||
11
vendor/github.com/tmthrgd/go-hex/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/tmthrgd/go-hex/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
- 1.12.x
|
||||||
|
- 1.13.x
|
||||||
|
- tip
|
||||||
|
matrix:
|
||||||
|
fast_finish: true
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
82
vendor/github.com/tmthrgd/go-hex/LICENSE
generated
vendored
Normal file
82
vendor/github.com/tmthrgd/go-hex/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
Copyright (c) 2016, Tom Thorogood.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
* Neither the name of the Tom Thorogood nor the
|
||||||
|
names of its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
---- Portions of the source code are also covered by the following license: ----
|
||||||
|
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
---- Portions of the source code are also covered by the following license: ----
|
||||||
|
|
||||||
|
Copyright (c) 2005-2016, Wojciech Muła
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||||
|
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||||
|
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||||
|
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
108
vendor/github.com/tmthrgd/go-hex/README.md
generated
vendored
Normal file
108
vendor/github.com/tmthrgd/go-hex/README.md
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
# go-hex
|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/tmthrgd/go-hex)
|
||||||
|
[](https://travis-ci.org/tmthrgd/go-hex)
|
||||||
|
|
||||||
|
An efficient hexadecimal implementation for Golang.
|
||||||
|
|
||||||
|
go-hex provides hex encoding and decoding using SSE/AVX instructions on x86-64.
|
||||||
|
|
||||||
|
## Download
|
||||||
|
|
||||||
|
```
|
||||||
|
go get github.com/tmthrgd/go-hex
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benchmark
|
||||||
|
|
||||||
|
go-hex:
|
||||||
|
```
|
||||||
|
BenchmarkEncode/15-8 100000000 17.4 ns/op 863.43 MB/s
|
||||||
|
BenchmarkEncode/32-8 100000000 11.9 ns/op 2690.43 MB/s
|
||||||
|
BenchmarkEncode/128-8 100000000 21.4 ns/op 5982.92 MB/s
|
||||||
|
BenchmarkEncode/1k-8 20000000 88.5 ns/op 11572.80 MB/s
|
||||||
|
BenchmarkEncode/16k-8 1000000 1254 ns/op 13058.10 MB/s
|
||||||
|
BenchmarkEncode/128k-8 100000 12965 ns/op 10109.53 MB/s
|
||||||
|
BenchmarkEncode/1M-8 10000 119465 ns/op 8777.23 MB/s
|
||||||
|
BenchmarkEncode/16M-8 500 3530380 ns/op 4752.24 MB/s
|
||||||
|
BenchmarkEncode/128M-8 50 28001913 ns/op 4793.16 MB/s
|
||||||
|
BenchmarkDecode/14-8 100000000 12.6 ns/op 1110.01 MB/s
|
||||||
|
BenchmarkDecode/32-8 100000000 12.5 ns/op 2558.10 MB/s
|
||||||
|
BenchmarkDecode/128-8 50000000 27.2 ns/op 4697.66 MB/s
|
||||||
|
BenchmarkDecode/1k-8 10000000 168 ns/op 6093.43 MB/s
|
||||||
|
BenchmarkDecode/16k-8 500000 2543 ns/op 6442.09 MB/s
|
||||||
|
BenchmarkDecode/128k-8 100000 20339 ns/op 6444.24 MB/s
|
||||||
|
BenchmarkDecode/1M-8 10000 164313 ns/op 6381.57 MB/s
|
||||||
|
BenchmarkDecode/16M-8 500 3099822 ns/op 5412.31 MB/s
|
||||||
|
BenchmarkDecode/128M-8 50 24865822 ns/op 5397.68 MB/s
|
||||||
|
```
|
||||||
|
|
||||||
|
[encoding/hex](https://golang.org/pkg/encoding/hex/):
|
||||||
|
```
|
||||||
|
BenchmarkRefEncode/15-8 50000000 36.1 ns/op 415.07 MB/s
|
||||||
|
BenchmarkRefEncode/32-8 20000000 72.9 ns/op 439.14 MB/s
|
||||||
|
BenchmarkRefEncode/128-8 5000000 289 ns/op 441.54 MB/s
|
||||||
|
BenchmarkRefEncode/1k-8 1000000 2268 ns/op 451.49 MB/s
|
||||||
|
BenchmarkRefEncode/16k-8 30000 39110 ns/op 418.91 MB/s
|
||||||
|
BenchmarkRefEncode/128k-8 5000 291260 ns/op 450.02 MB/s
|
||||||
|
BenchmarkRefEncode/1M-8 1000 2277578 ns/op 460.39 MB/s
|
||||||
|
BenchmarkRefEncode/16M-8 30 37087543 ns/op 452.37 MB/s
|
||||||
|
BenchmarkRefEncode/128M-8 5 293611713 ns/op 457.13 MB/s
|
||||||
|
BenchmarkRefDecode/14-8 30000000 53.7 ns/op 260.49 MB/s
|
||||||
|
BenchmarkRefDecode/32-8 10000000 128 ns/op 248.44 MB/s
|
||||||
|
BenchmarkRefDecode/128-8 3000000 481 ns/op 265.95 MB/s
|
||||||
|
BenchmarkRefDecode/1k-8 300000 4172 ns/op 245.43 MB/s
|
||||||
|
BenchmarkRefDecode/16k-8 10000 111989 ns/op 146.30 MB/s
|
||||||
|
BenchmarkRefDecode/128k-8 2000 909077 ns/op 144.18 MB/s
|
||||||
|
BenchmarkRefDecode/1M-8 200 7275779 ns/op 144.12 MB/s
|
||||||
|
BenchmarkRefDecode/16M-8 10 116574839 ns/op 143.92 MB/s
|
||||||
|
BenchmarkRefDecode/128M-8 2 933871637 ns/op 143.72 MB/s
|
||||||
|
```
|
||||||
|
|
||||||
|
[encoding/hex](https://golang.org/pkg/encoding/hex/) -> go-hex:
|
||||||
|
```
|
||||||
|
benchmark old ns/op new ns/op delta
|
||||||
|
BenchmarkEncode/15-8 36.1 17.4 -51.80%
|
||||||
|
BenchmarkEncode/32-8 72.9 11.9 -83.68%
|
||||||
|
BenchmarkEncode/128-8 289 21.4 -92.60%
|
||||||
|
BenchmarkEncode/1k-8 2268 88.5 -96.10%
|
||||||
|
BenchmarkEncode/16k-8 39110 1254 -96.79%
|
||||||
|
BenchmarkEncode/128k-8 291260 12965 -95.55%
|
||||||
|
BenchmarkEncode/1M-8 2277578 119465 -94.75%
|
||||||
|
BenchmarkEncode/16M-8 37087543 3530380 -90.48%
|
||||||
|
BenchmarkEncode/128M-8 293611713 28001913 -90.46%
|
||||||
|
BenchmarkDecode/14-8 53.7 12.6 -76.54%
|
||||||
|
BenchmarkDecode/32-8 128 12.5 -90.23%
|
||||||
|
BenchmarkDecode/128-8 481 27.2 -94.35%
|
||||||
|
BenchmarkDecode/1k-8 4172 168 -95.97%
|
||||||
|
BenchmarkDecode/16k-8 111989 2543 -97.73%
|
||||||
|
BenchmarkDecode/128k-8 909077 20339 -97.76%
|
||||||
|
BenchmarkDecode/1M-8 7275779 164313 -97.74%
|
||||||
|
BenchmarkDecode/16M-8 116574839 3099822 -97.34%
|
||||||
|
BenchmarkDecode/128M-8 933871637 24865822 -97.34%
|
||||||
|
|
||||||
|
benchmark old MB/s new MB/s speedup
|
||||||
|
BenchmarkEncode/15-8 415.07 863.43 2.08x
|
||||||
|
BenchmarkEncode/32-8 439.14 2690.43 6.13x
|
||||||
|
BenchmarkEncode/128-8 441.54 5982.92 13.55x
|
||||||
|
BenchmarkEncode/1k-8 451.49 11572.80 25.63x
|
||||||
|
BenchmarkEncode/16k-8 418.91 13058.10 31.17x
|
||||||
|
BenchmarkEncode/128k-8 450.02 10109.53 22.46x
|
||||||
|
BenchmarkEncode/1M-8 460.39 8777.23 19.06x
|
||||||
|
BenchmarkEncode/16M-8 452.37 4752.24 10.51x
|
||||||
|
BenchmarkEncode/128M-8 457.13 4793.16 10.49x
|
||||||
|
BenchmarkDecode/14-8 260.49 1110.01 4.26x
|
||||||
|
BenchmarkDecode/32-8 248.44 2558.10 10.30x
|
||||||
|
BenchmarkDecode/128-8 265.95 4697.66 17.66x
|
||||||
|
BenchmarkDecode/1k-8 245.43 6093.43 24.83x
|
||||||
|
BenchmarkDecode/16k-8 146.30 6442.09 44.03x
|
||||||
|
BenchmarkDecode/128k-8 144.18 6444.24 44.70x
|
||||||
|
BenchmarkDecode/1M-8 144.12 6381.57 44.28x
|
||||||
|
BenchmarkDecode/16M-8 143.92 5412.31 37.61x
|
||||||
|
BenchmarkDecode/128M-8 143.72 5397.68 37.56x
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Unless otherwise noted, the go-hex source files are distributed under the Modified BSD License
|
||||||
|
found in the LICENSE file.
|
||||||
137
vendor/github.com/tmthrgd/go-hex/hex.go
generated
vendored
Normal file
137
vendor/github.com/tmthrgd/go-hex/hex.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||||
|
// Use of this source code is governed by a
|
||||||
|
// Modified BSD License license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
//
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package hex is an efficient hexadecimal implementation for Golang.
|
||||||
|
package hex
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errLength = errors.New("go-hex: odd length hex string")
|
||||||
|
|
||||||
|
var (
|
||||||
|
lower = []byte("0123456789abcdef")
|
||||||
|
upper = []byte("0123456789ABCDEF")
|
||||||
|
)
|
||||||
|
|
||||||
|
// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
|
||||||
|
type InvalidByteError byte
|
||||||
|
|
||||||
|
func (e InvalidByteError) Error() string {
|
||||||
|
return fmt.Sprintf("go-hex: invalid byte: %#U", rune(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodedLen returns the length of an encoding of n source bytes.
|
||||||
|
func EncodedLen(n int) int {
|
||||||
|
return n * 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodedLen returns the length of a decoding of n source bytes.
|
||||||
|
func DecodedLen(n int) int {
|
||||||
|
return n / 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode encodes src into EncodedLen(len(src))
|
||||||
|
// bytes of dst. As a convenience, it returns the number
|
||||||
|
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||||
|
// Encode implements lowercase hexadecimal encoding.
|
||||||
|
func Encode(dst, src []byte) int {
|
||||||
|
return RawEncode(dst, src, lower)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeUpper encodes src into EncodedLen(len(src))
|
||||||
|
// bytes of dst. As a convenience, it returns the number
|
||||||
|
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||||
|
// EncodeUpper implements uppercase hexadecimal encoding.
|
||||||
|
func EncodeUpper(dst, src []byte) int {
|
||||||
|
return RawEncode(dst, src, upper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeToString returns the lowercase hexadecimal encoding of src.
|
||||||
|
func EncodeToString(src []byte) string {
|
||||||
|
return RawEncodeToString(src, lower)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeUpperToString returns the uppercase hexadecimal encoding of src.
|
||||||
|
func EncodeUpperToString(src []byte) string {
|
||||||
|
return RawEncodeToString(src, upper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawEncodeToString returns the hexadecimal encoding of src for a given
|
||||||
|
// alphabet.
|
||||||
|
func RawEncodeToString(src, alpha []byte) string {
|
||||||
|
dst := make([]byte, EncodedLen(len(src)))
|
||||||
|
RawEncode(dst, src, alpha)
|
||||||
|
return string(dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeString returns the bytes represented by the hexadecimal string s.
|
||||||
|
func DecodeString(s string) ([]byte, error) {
|
||||||
|
src := []byte(s)
|
||||||
|
dst := make([]byte, DecodedLen(len(src)))
|
||||||
|
|
||||||
|
if _, err := Decode(dst, src); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustDecodeString is like DecodeString but panics if the string cannot be
|
||||||
|
// parsed. It simplifies safe initialization of global variables holding
|
||||||
|
// binary data.
|
||||||
|
func MustDecodeString(str string) []byte {
|
||||||
|
dst, err := DecodeString(str)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeGeneric(dst, src, alpha []byte) {
|
||||||
|
for i, v := range src {
|
||||||
|
dst[i*2] = alpha[v>>4]
|
||||||
|
dst[i*2+1] = alpha[v&0x0f]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeGeneric(dst, src []byte) (uint64, bool) {
|
||||||
|
for i := 0; i < len(src)/2; i++ {
|
||||||
|
a, ok := fromHexChar(src[i*2])
|
||||||
|
if !ok {
|
||||||
|
return uint64(i * 2), false
|
||||||
|
}
|
||||||
|
|
||||||
|
b, ok := fromHexChar(src[i*2+1])
|
||||||
|
if !ok {
|
||||||
|
return uint64(i*2 + 1), false
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[i] = (a << 4) | b
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// fromHexChar converts a hex character into its value and a success flag.
|
||||||
|
func fromHexChar(c byte) (byte, bool) {
|
||||||
|
switch {
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return c - '0', true
|
||||||
|
case 'a' <= c && c <= 'f':
|
||||||
|
return c - 'a' + 10, true
|
||||||
|
case 'A' <= c && c <= 'F':
|
||||||
|
return c - 'A' + 10, true
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user