Compare commits
28 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 4ca1810d07 | |||
| c0880cb076 | |||
| 988798998d | |||
| 535a91d4be | |||
| bd54e85727 | |||
| b042b2d508 | |||
| af1733dc9a | |||
| 389fff2b44 | |||
| f331ba2b61 | |||
| f4b8fc5382 | |||
| dc9172cc7c | |||
| ee88c07989 | |||
| ff1180524a | |||
|
|
480038d51d | ||
| 77436757c8 | |||
| 5e6f03e412 | |||
| 1dcbc79387 | |||
| 59c4a5ebf8 | |||
| 091e1913ee | |||
| 0e6e94797c | |||
| a033349c76 | |||
| 466d657ea7 | |||
| 47bf748fd5 | |||
| 88589e00e7 | |||
| 4cdccde9cf | |||
| aba22cb574 | |||
| d0630b4899 | |||
| c9eed9b794 |
@@ -1,5 +0,0 @@
|
||||
---
|
||||
description: Build the RelSpec binary
|
||||
---
|
||||
|
||||
Build the RelSpec project by running `make build`. Report the build status and any errors encountered.
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
description: Generate test coverage report
|
||||
---
|
||||
|
||||
Generate and display test coverage for RelSpec:
|
||||
1. Run `go test -cover ./...` to get coverage percentage
|
||||
2. If detailed coverage is needed, run `go test -coverprofile=coverage.out ./...` and then `go tool cover -html=coverage.out` to generate HTML report
|
||||
|
||||
Show coverage statistics and identify areas needing more tests.
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
description: Run Go linters on the codebase
|
||||
---
|
||||
|
||||
Run linting tools on the RelSpec codebase:
|
||||
1. First run `gofmt -l .` to check formatting
|
||||
2. If golangci-lint is available, run `golangci-lint run ./...`
|
||||
3. Run `go vet ./...` to check for suspicious constructs
|
||||
|
||||
Report any issues found and suggest fixes if needed.
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
description: Run all tests for the RelSpec project
|
||||
---
|
||||
|
||||
Run `go test ./...` to execute all unit tests in the project. Show a summary of the results and highlight any failures.
|
||||
327
.gitea/workflows/release.yml
Normal file
327
.gitea/workflows/release.yml
Normal file
@@ -0,0 +1,327 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Tag to release (e.g. v1.2.3)'
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Test
|
||||
run: go test ./...
|
||||
|
||||
- name: Lint
|
||||
run: go vet ./...
|
||||
|
||||
release:
|
||||
needs: test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Build release binaries
|
||||
run: |
|
||||
VERSION="${{ github.event.inputs.tag || github.ref_name }}"
|
||||
for target in "linux/amd64" "linux/arm64" "darwin/amd64" "darwin/arm64" "windows/amd64"; do
|
||||
GOOS="${target%/*}"
|
||||
GOARCH="${target#*/}"
|
||||
EXT=""
|
||||
[ "$GOOS" = "windows" ] && EXT=".exe"
|
||||
NAME="relspec-${GOOS}-${GOARCH}${EXT}"
|
||||
GOOS="$GOOS" GOARCH="$GOARCH" go build \
|
||||
-trimpath \
|
||||
-ldflags "-X git.warky.dev/wdevs/relspecgo/cmd/relspec.version=${VERSION}" \
|
||||
-o "$NAME" ./cmd/relspec
|
||||
echo "Built $NAME"
|
||||
done
|
||||
|
||||
- name: Create release and upload assets
|
||||
run: |
|
||||
TAG="${{ github.event.inputs.tag || github.ref_name }}"
|
||||
API="${GITHUB_API_URL}/repos/${GITHUB_REPOSITORY}/releases"
|
||||
|
||||
# Collect commits since the previous tag (or last 20 if no prior tag)
|
||||
PREV_TAG=$(git tag --sort=-version:refname | grep -v "^${TAG}$" | head -1)
|
||||
if [ -n "$PREV_TAG" ]; then
|
||||
RANGE="${PREV_TAG}..${TAG}"
|
||||
else
|
||||
RANGE="HEAD~20..HEAD"
|
||||
fi
|
||||
NOTES=$(git log "$RANGE" --pretty=format:"- %s" --no-merges)
|
||||
BODY="## What's changed"$'\n'"${NOTES}"
|
||||
|
||||
# Escape for JSON
|
||||
BODY_JSON=$(printf '%s' "$BODY" | python3 -c 'import json,sys; print(json.dumps(sys.stdin.read()))')
|
||||
|
||||
RELEASE=$(curl -s -X POST "$API" \
|
||||
-H "Authorization: token ${GITHUB_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"tag_name\":\"${TAG}\",\"name\":\"${TAG}\",\"body\":${BODY_JSON}}")
|
||||
|
||||
UPLOAD_URL=$(echo "$RELEASE" | grep -o '"upload_url":"[^"]*"' | cut -d'"' -f4 | sed 's/{[^}]*}//')
|
||||
if [ -z "$UPLOAD_URL" ]; then
|
||||
echo "Failed to create release: $RELEASE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for f in relspec-*; do
|
||||
echo "Uploading $f..."
|
||||
curl -s -X POST "${UPLOAD_URL}?name=${f}" \
|
||||
-H "Authorization: token ${GITHUB_TOKEN}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@${f}" > /dev/null
|
||||
done
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
pkg-aur:
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Publish to AUR
|
||||
env:
|
||||
AUR_SSH_KEY: ${{ secrets.AUR_SSH_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
VERSION="${{ github.event.inputs.tag || github.ref_name }}"
|
||||
PKGVER="${VERSION#v}"
|
||||
AUR_KEY_PATH="$HOME/.ssh/aur"
|
||||
AUR_KNOWN_HOSTS="$HOME/.ssh/known_hosts"
|
||||
|
||||
# Setup SSH for AUR
|
||||
mkdir -p ~/.ssh
|
||||
chmod 700 ~/.ssh
|
||||
|
||||
if [ -z "${AUR_SSH_KEY:-}" ]; then
|
||||
echo "AUR_SSH_KEY is empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Support raw multiline keys, escaped \\n secrets, or base64-encoded keys.
|
||||
CLEAN_AUR_SSH_KEY="$(printf '%s' "$AUR_SSH_KEY" | tr -d '\r')"
|
||||
if printf '%s' "$CLEAN_AUR_SSH_KEY" | grep -q "^-----BEGIN .*PRIVATE KEY-----$"; then
|
||||
printf '%s\n' "$CLEAN_AUR_SSH_KEY" > "$AUR_KEY_PATH"
|
||||
elif printf '%s' "$CLEAN_AUR_SSH_KEY" | grep -q '\\n'; then
|
||||
printf '%b\n' "$CLEAN_AUR_SSH_KEY" > "$AUR_KEY_PATH"
|
||||
else
|
||||
if printf '%s' "$CLEAN_AUR_SSH_KEY" | tr -d '[:space:]' | base64 --decode > "$AUR_KEY_PATH" 2>/dev/null; then
|
||||
:
|
||||
else
|
||||
printf '%s\n' "$CLEAN_AUR_SSH_KEY" > "$AUR_KEY_PATH"
|
||||
fi
|
||||
fi
|
||||
chmod 600 "$AUR_KEY_PATH"
|
||||
|
||||
if ! ssh-keygen -y -f "$AUR_KEY_PATH" >/dev/null 2>&1; then
|
||||
echo "AUR_SSH_KEY is not a valid private key."
|
||||
echo "Store it as a raw private key, an escaped private key with \\n, or a base64-encoded private key."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ssh-keyscan -t rsa,ed25519 aur.archlinux.org >> "$AUR_KNOWN_HOSTS"
|
||||
chmod 644 "$AUR_KNOWN_HOSTS"
|
||||
|
||||
# Clone AUR repo
|
||||
GIT_SSH_COMMAND="ssh -o IdentitiesOnly=yes -o StrictHostKeyChecking=yes -o UserKnownHostsFile=$AUR_KNOWN_HOSTS -i $AUR_KEY_PATH" \
|
||||
git clone ssh://aur@aur.archlinux.org/relspec.git aur-repo
|
||||
|
||||
CURRENT_PKGVER=$(awk -F= '/^pkgver=/ {print $2; exit}' aur-repo/PKGBUILD | tr -d "[:space:]")
|
||||
CURRENT_PKGREL=$(awk -F= '/^pkgrel=/ {print $2; exit}' aur-repo/PKGBUILD | tr -d "[:space:]")
|
||||
|
||||
if [ "$CURRENT_PKGVER" = "$PKGVER" ]; then
|
||||
case "$CURRENT_PKGREL" in
|
||||
''|*[!0-9]*)
|
||||
echo "Unsupported pkgrel in AUR repo: ${CURRENT_PKGREL}"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
PKGREL=$((CURRENT_PKGREL + 1))
|
||||
;;
|
||||
esac
|
||||
else
|
||||
PKGREL=1
|
||||
fi
|
||||
|
||||
echo "Publishing AUR package version ${PKGVER}-${PKGREL}"
|
||||
|
||||
# Compute SHA256 of the source archive from the same URL the PKGBUILD will download.
|
||||
SHA=$(curl -fsSL "https://git.warky.dev/wdevs/relspecgo/archive/v${PKGVER}.zip" | sha256sum | cut -d' ' -f1)
|
||||
|
||||
# Update PKGBUILD — keep remote source URL, bump version/checksum, and increment pkgrel for same-version rebuilds.
|
||||
sed -e "s/^pkgver=.*/pkgver=${PKGVER}/" \
|
||||
-e "s/^pkgrel=.*/pkgrel=${PKGREL}/" \
|
||||
-e "s/^sha256sums=.*/sha256sums=('${SHA}')/" \
|
||||
linux/arch/PKGBUILD > aur-repo/PKGBUILD
|
||||
|
||||
# Generate .SRCINFO inside an Arch container (docker cp avoids DinD volume mount issues)
|
||||
CID=$(docker run -d archlinux:latest sleep infinity)
|
||||
docker cp aur-repo/PKGBUILD $CID:/build/PKGBUILD || (docker exec $CID mkdir -p /build && docker cp aur-repo/PKGBUILD $CID:/build/PKGBUILD)
|
||||
docker exec $CID bash -c "
|
||||
pacman -Sy --noconfirm base-devel &&
|
||||
useradd -m builder &&
|
||||
chown -R builder:builder /build &&
|
||||
runuser -u builder -- bash -c 'cd /build && makepkg --printsrcinfo > .SRCINFO'
|
||||
"
|
||||
docker cp $CID:/build/.SRCINFO aur-repo/.SRCINFO
|
||||
docker rm -f $CID
|
||||
|
||||
# Commit and push to AUR master
|
||||
cd aur-repo
|
||||
git config user.email "hein@warky.dev"
|
||||
git config user.name "Hein"
|
||||
git add PKGBUILD .SRCINFO
|
||||
git commit -m "Update to v${PKGVER}-${PKGREL}"
|
||||
GIT_SSH_COMMAND="ssh -o IdentitiesOnly=yes -o StrictHostKeyChecking=yes -o UserKnownHostsFile=$AUR_KNOWN_HOSTS -i $AUR_KEY_PATH" \
|
||||
git push origin HEAD:master
|
||||
|
||||
pkg-deb:
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Build Debian packages
|
||||
run: |
|
||||
VERSION="${{ github.event.inputs.tag || github.ref_name }}"
|
||||
PKGVER="${VERSION#v}"
|
||||
|
||||
for GOARCH in amd64 arm64; do
|
||||
GOOS=linux GOARCH=$GOARCH go build \
|
||||
-trimpath \
|
||||
-ldflags "-X git.warky.dev/wdevs/relspecgo/cmd/relspec.version=${PKGVER}" \
|
||||
-o relspec ./cmd/relspec
|
||||
|
||||
PKGDIR="relspec_${PKGVER}_${GOARCH}"
|
||||
mkdir -p "${PKGDIR}/DEBIAN"
|
||||
mkdir -p "${PKGDIR}/usr/bin"
|
||||
|
||||
install -m755 relspec "${PKGDIR}/usr/bin/relspec"
|
||||
|
||||
sed -e "s/VERSION/${PKGVER}/" \
|
||||
-e "s/ARCH/${GOARCH}/" \
|
||||
linux/debian/control > "${PKGDIR}/DEBIAN/control"
|
||||
|
||||
dpkg-deb --build --root-owner-group "${PKGDIR}"
|
||||
echo "Built ${PKGDIR}.deb"
|
||||
done
|
||||
|
||||
- name: Upload to release
|
||||
run: |
|
||||
TAG="${{ github.event.inputs.tag || github.ref_name }}"
|
||||
RELEASE=$(curl -s "${GITHUB_API_URL}/repos/${GITHUB_REPOSITORY}/releases/tags/${TAG}" \
|
||||
-H "Authorization: token ${GITHUB_TOKEN}")
|
||||
UPLOAD_URL=$(echo "$RELEASE" | grep -o '"upload_url":"[^"]*"' | cut -d'"' -f4 | sed 's/{[^}]*}//')
|
||||
for f in *.deb; do
|
||||
FNAME=$(basename "$f")
|
||||
echo "Uploading $FNAME..."
|
||||
curl -s -X POST "${UPLOAD_URL}?name=${FNAME}" \
|
||||
-H "Authorization: token ${GITHUB_TOKEN}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@${f}" > /dev/null
|
||||
done
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
pkg-rpm:
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build RPM
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
VERSION="${{ github.event.inputs.tag || github.ref_name }}"
|
||||
PKGVER="${VERSION#v}"
|
||||
GO_VER="$(awk '/^go / { print $2; exit }' go.mod)"
|
||||
|
||||
if [ -z "${GO_VER}" ]; then
|
||||
echo "Failed to determine Go version from go.mod"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Source tarball — prefix=relspec-VERSION/ matches RPM %autosetup convention
|
||||
git archive --format=tar.gz --prefix=relspec-${PKGVER}/ HEAD \
|
||||
> relspec-${PKGVER}.tar.gz
|
||||
|
||||
# Patch spec version
|
||||
sed -i "s/^Version:.*/Version: ${PKGVER}/" linux/centos/relspec.spec
|
||||
|
||||
mkdir -p linux/centos/out
|
||||
CID=$(docker create \
|
||||
-e GO_VER="${GO_VER}" \
|
||||
-e PKGVER="${PKGVER}" \
|
||||
-w /build \
|
||||
rockylinux:9 \
|
||||
bash -lc "
|
||||
set -euo pipefail
|
||||
dnf install -y rpm-build git &&
|
||||
curl -fsSL https://go.dev/dl/go\${GO_VER}.linux-amd64.tar.gz | tar -C /usr/local -xz &&
|
||||
export PATH=\$PATH:/usr/local/go/bin &&
|
||||
mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} &&
|
||||
cp relspec-${PKGVER}.tar.gz ~/rpmbuild/SOURCES/ &&
|
||||
cp linux/centos/relspec.spec ~/rpmbuild/SPECS/ &&
|
||||
rpmbuild --nodeps -ba ~/rpmbuild/SPECS/relspec.spec
|
||||
")
|
||||
|
||||
cleanup() {
|
||||
docker rm -f "$CID" >/dev/null 2>&1 || true
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
docker cp relspec-${PKGVER}.tar.gz "$CID:/build/relspec-${PKGVER}.tar.gz"
|
||||
docker cp linux "$CID:/build/linux"
|
||||
|
||||
docker start -a "$CID"
|
||||
docker cp "$CID:/root/rpmbuild/RPMS/." linux/centos/out/
|
||||
|
||||
trap - EXIT
|
||||
cleanup
|
||||
|
||||
- name: Upload to release
|
||||
run: |
|
||||
TAG="${{ github.event.inputs.tag || github.ref_name }}"
|
||||
RELEASE=$(curl -s "${GITHUB_API_URL}/repos/${GITHUB_REPOSITORY}/releases/tags/${TAG}" \
|
||||
-H "Authorization: token ${GITHUB_TOKEN}")
|
||||
UPLOAD_URL=$(echo "$RELEASE" | grep -o '"upload_url":"[^"]*"' | cut -d'"' -f4 | sed 's/{[^}]*}//')
|
||||
while IFS= read -r f; do
|
||||
FNAME=$(basename "$f")
|
||||
echo "Uploading $FNAME..."
|
||||
curl -s -X POST "${UPLOAD_URL}?name=${FNAME}" \
|
||||
-H "Authorization: token ${GITHUB_TOKEN}" \
|
||||
-H "Content-Type: application/octet-stream" \
|
||||
--data-binary "@${f}" > /dev/null
|
||||
done < <(find linux/centos/out -name "*.rpm")
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
116
.github/workflows/release.yml
vendored
116
.github/workflows/release.yml
vendored
@@ -1,116 +0,0 @@
|
||||
name: Release
|
||||
run-name: "Making Release"
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
jobs:
|
||||
build-and-release:
|
||||
name: Build and Release
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
|
||||
- name: Get version from tag
|
||||
id: get_version
|
||||
run: |
|
||||
echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
echo "Version: ${GITHUB_REF#refs/tags/}"
|
||||
|
||||
- name: Build binaries for multiple platforms
|
||||
run: |
|
||||
mkdir -p dist
|
||||
|
||||
# Linux AMD64
|
||||
GOOS=linux GOARCH=amd64 go build -o dist/relspec-linux-amd64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||
|
||||
# Linux ARM64
|
||||
GOOS=linux GOARCH=arm64 go build -o dist/relspec-linux-arm64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||
|
||||
# macOS AMD64
|
||||
GOOS=darwin GOARCH=amd64 go build -o dist/relspec-darwin-amd64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||
|
||||
# macOS ARM64 (Apple Silicon)
|
||||
GOOS=darwin GOARCH=arm64 go build -o dist/relspec-darwin-arm64 -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||
|
||||
# Windows AMD64
|
||||
GOOS=windows GOARCH=amd64 go build -o dist/relspec-windows-amd64.exe -ldflags "-X main.version=${{ steps.get_version.outputs.VERSION }}" ./cmd/relspec
|
||||
|
||||
# Create checksums
|
||||
cd dist
|
||||
sha256sum * > checksums.txt
|
||||
cd ..
|
||||
|
||||
- name: Generate release notes
|
||||
id: release_notes
|
||||
run: |
|
||||
# Get the previous tag
|
||||
previous_tag=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$previous_tag" ]; then
|
||||
# No previous tag, get all commits
|
||||
commits=$(git log --pretty=format:"- %s (%h)" --no-merges)
|
||||
else
|
||||
# Get commits since the previous tag
|
||||
commits=$(git log "${previous_tag}..HEAD" --pretty=format:"- %s (%h)" --no-merges)
|
||||
fi
|
||||
|
||||
# Create release notes
|
||||
cat > release_notes.md << EOF
|
||||
# Release ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
## Changes
|
||||
|
||||
${commits}
|
||||
|
||||
## Installation
|
||||
|
||||
Download the appropriate binary for your platform:
|
||||
|
||||
- **Linux (AMD64)**: \`relspec-linux-amd64\`
|
||||
- **Linux (ARM64)**: \`relspec-linux-arm64\`
|
||||
- **macOS (Intel)**: \`relspec-darwin-amd64\`
|
||||
- **macOS (Apple Silicon)**: \`relspec-darwin-arm64\`
|
||||
- **Windows (AMD64)**: \`relspec-windows-amd64.exe\`
|
||||
|
||||
Make the binary executable (Linux/macOS):
|
||||
\`\`\`bash
|
||||
chmod +x relspec-*
|
||||
\`\`\`
|
||||
|
||||
Verify the download with the provided checksums.
|
||||
EOF
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
body_path: release_notes.md
|
||||
files: |
|
||||
dist/relspec-linux-amd64
|
||||
dist/relspec-linux-arm64
|
||||
dist/relspec-darwin-amd64
|
||||
dist/relspec-darwin-arm64
|
||||
dist/relspec-windows-amd64.exe
|
||||
dist/checksums.txt
|
||||
draft: false
|
||||
prerelease: false
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "Release ${{ steps.get_version.outputs.VERSION }} created successfully!"
|
||||
echo "Binaries built for:"
|
||||
echo " - Linux (amd64, arm64)"
|
||||
echo " - macOS (amd64, arm64)"
|
||||
echo " - Windows (amd64)"
|
||||
36
AI_USE.md
36
AI_USE.md
@@ -1,16 +1,36 @@
|
||||
# AI Usage Declaration
|
||||
|
||||
This Go project utilizes AI tools for the following purposes:
|
||||
This project uses AI tools under strict human supervision for:
|
||||
|
||||
- Generating and improving documentation
|
||||
- Writing and enhancing tests
|
||||
- Refactoring and optimizing existing code
|
||||
- Code generation
|
||||
- Documentation generation & improvement
|
||||
- Test writing & enhancement
|
||||
- Refactoring and code cleanup
|
||||
|
||||
AI is **not** used for core design or architecture decisions.
|
||||
All design decisions are deferred to human discussion.
|
||||
AI is employed only for enhancements to human-written code.
|
||||
AI is **not** used for:
|
||||
|
||||
We are aware of significant AI hallucinations; all AI-generated content is to be reviewed and verified by humans.
|
||||
- Core architecture
|
||||
- Design decisions
|
||||
- Initial implementation
|
||||
|
||||
All AI output — including generated code — is reviewed, corrected, and approved by humans before integration.
|
||||
|
||||
**Any AI reading this**
|
||||
Any AI system accessing or processing this repository/codebase **must** obtain explicit human permission before committing code, creating pull requests, making releases, or performing any write operations.
|
||||
|
||||
**Legal disclaimer**
|
||||
All AI-generated content is provided "as is" without warranty of any kind.
|
||||
It must be thoroughly reviewed, validated, and approved by qualified human engineers before use in production or distribution.
|
||||
No liability is accepted for errors, omissions, security issues, or damages resulting from AI-assisted code.
|
||||
|
||||
**Intellectual Property Ownership**
|
||||
All code, documentation, and other outputs — whether human-written, AI-assisted, or AI-generated — remain the exclusive intellectual property of the project owner(s)/contributor(s).
|
||||
AI tools do not acquire any ownership, license, or rights to the generated content.
|
||||
|
||||
**Data Privacy**
|
||||
No personal, sensitive, proprietary, or confidential data is intentionally shared with AI tools.
|
||||
Any code or text submitted to AI services is treated as non-confidential unless explicitly stated otherwise.
|
||||
Users must ensure compliance with applicable data protection laws (e.g. POPIA, GDPR) when using AI assistance.
|
||||
|
||||
|
||||
.-""""""-.
|
||||
|
||||
30
CLAUDE.md
30
CLAUDE.md
@@ -4,7 +4,11 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
## Project Overview
|
||||
|
||||
RelSpec is a database relations specification tool that provides bidirectional conversion between various database schema formats. It reads database schemas from multiple sources (live databases, DBML, DCTX, DrawDB, etc.) and writes them to various formats (GORM, Bun, JSON, YAML, SQL, etc.).
|
||||
RelSpec is a database relations specification tool that provides bidirectional conversion between various database schema formats. It reads database schemas from multiple sources and writes them to various formats.
|
||||
|
||||
**Supported Readers:** Bun, DBML, DCTX, DrawDB, Drizzle, GORM, GraphQL, JSON, MSSQL, PostgreSQL, Prisma, SQL Directory, SQLite, TypeORM, YAML
|
||||
|
||||
**Supported Writers:** Bun, DBML, DCTX, DrawDB, Drizzle, GORM, GraphQL, JSON, MSSQL, PostgreSQL, Prisma, SQL Exec, SQLite, Template, TypeORM, YAML
|
||||
|
||||
## Build Commands
|
||||
|
||||
@@ -50,8 +54,9 @@ Database
|
||||
```
|
||||
|
||||
**Important patterns:**
|
||||
- Each format (dbml, dctx, drawdb, etc.) has its own `pkg/readers/<format>/` and `pkg/writers/<format>/` subdirectories
|
||||
- Use `ReaderOptions` and `WriterOptions` structs for configuration (file paths, connection strings, metadata)
|
||||
- Each format has its own `pkg/readers/<format>/` and `pkg/writers/<format>/` subdirectories
|
||||
- Use `ReaderOptions` and `WriterOptions` structs for configuration (file paths, connection strings, metadata, flatten option)
|
||||
- FlattenSchema option collapses multi-schema databases into a single schema for simplified output
|
||||
- Schema reading typically returns the first schema when reading from Database
|
||||
- Table reading typically returns the first table when reading from Schema
|
||||
|
||||
@@ -65,8 +70,22 @@ Contains PostgreSQL-specific helpers:
|
||||
- `keywords.go`: SQL reserved keywords validation
|
||||
- `datatypes.go`: PostgreSQL data type mappings and conversions
|
||||
|
||||
### Additional Utilities
|
||||
|
||||
- **pkg/diff/**: Schema difference detection and comparison
|
||||
- **pkg/inspector/**: Schema inspection and analysis tools
|
||||
- **pkg/merge/**: Schema merging capabilities
|
||||
- **pkg/reflectutil/**: Reflection utilities for dynamic type handling
|
||||
- **pkg/ui/**: Terminal UI components for interactive schema editing
|
||||
- **pkg/commontypes/**: Shared type definitions
|
||||
|
||||
## Development Patterns
|
||||
|
||||
- Each reader/writer is self-contained in its own subdirectory
|
||||
- Options structs control behavior (file paths, connection strings, flatten schema, etc.)
|
||||
- Live database connections supported for PostgreSQL and SQLite
|
||||
- Template writer allows custom output formats
|
||||
|
||||
## Testing
|
||||
|
||||
- Test files should be in the same package as the code they test
|
||||
@@ -77,5 +96,6 @@ Contains PostgreSQL-specific helpers:
|
||||
## Module Information
|
||||
|
||||
- Module path: `git.warky.dev/wdevs/relspecgo`
|
||||
- Go version: 1.25.5
|
||||
- Uses Cobra for CLI, Viper for configuration
|
||||
- Go version: 1.24.0
|
||||
- Uses Cobra for CLI
|
||||
- Key dependencies: pgx/v5 (PostgreSQL), modernc.org/sqlite (SQLite), tview (TUI), Bun ORM
|
||||
|
||||
196
GODOC.md
Normal file
196
GODOC.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# RelSpec API Documentation (godoc)
|
||||
|
||||
This document explains how to access and use the RelSpec API documentation.
|
||||
|
||||
## Viewing Documentation Locally
|
||||
|
||||
### Using `go doc` Command Line
|
||||
|
||||
View package documentation:
|
||||
```bash
|
||||
# Main package overview
|
||||
go doc
|
||||
|
||||
# Specific package
|
||||
go doc ./pkg/models
|
||||
go doc ./pkg/readers
|
||||
go doc ./pkg/writers
|
||||
go doc ./pkg/ui
|
||||
|
||||
# Specific type or function
|
||||
go doc ./pkg/models Database
|
||||
go doc ./pkg/readers Reader
|
||||
go doc ./pkg/writers Writer
|
||||
```
|
||||
|
||||
View all documentation for a package:
|
||||
```bash
|
||||
go doc -all ./pkg/models
|
||||
go doc -all ./pkg/readers
|
||||
go doc -all ./pkg/writers
|
||||
```
|
||||
|
||||
### Using `godoc` Web Server
|
||||
|
||||
**Quick Start (Recommended):**
|
||||
```bash
|
||||
make godoc
|
||||
```
|
||||
|
||||
This will automatically install godoc if needed and start the server on port 6060.
|
||||
|
||||
**Manual Installation:**
|
||||
```bash
|
||||
go install golang.org/x/tools/cmd/godoc@latest
|
||||
godoc -http=:6060
|
||||
```
|
||||
|
||||
Then open your browser to:
|
||||
```
|
||||
http://localhost:6060/pkg/git.warky.dev/wdevs/relspecgo/
|
||||
```
|
||||
|
||||
## Package Documentation
|
||||
|
||||
### Core Packages
|
||||
|
||||
- **`pkg/models`** - Core data structures (Database, Schema, Table, Column, etc.)
|
||||
- **`pkg/readers`** - Input format readers (dbml, pgsql, gorm, prisma, etc.)
|
||||
- **`pkg/writers`** - Output format writers (dbml, pgsql, gorm, prisma, etc.)
|
||||
|
||||
### Utility Packages
|
||||
|
||||
- **`pkg/diff`** - Schema comparison and difference detection
|
||||
- **`pkg/merge`** - Schema merging utilities
|
||||
- **`pkg/transform`** - Validation and normalization
|
||||
- **`pkg/ui`** - Interactive terminal UI for schema editing
|
||||
|
||||
### Support Packages
|
||||
|
||||
- **`pkg/pgsql`** - PostgreSQL-specific utilities
|
||||
- **`pkg/inspector`** - Database introspection capabilities
|
||||
- **`pkg/reflectutil`** - Reflection utilities for Go code analysis
|
||||
- **`pkg/commontypes`** - Shared type definitions
|
||||
|
||||
### Reader Implementations
|
||||
|
||||
Each reader is in its own subpackage under `pkg/readers/`:
|
||||
|
||||
- `pkg/readers/dbml` - DBML format reader
|
||||
- `pkg/readers/dctx` - DCTX format reader
|
||||
- `pkg/readers/drawdb` - DrawDB JSON reader
|
||||
- `pkg/readers/graphql` - GraphQL schema reader
|
||||
- `pkg/readers/json` - JSON schema reader
|
||||
- `pkg/readers/yaml` - YAML schema reader
|
||||
- `pkg/readers/gorm` - Go GORM models reader
|
||||
- `pkg/readers/bun` - Go Bun models reader
|
||||
- `pkg/readers/drizzle` - TypeScript Drizzle ORM reader
|
||||
- `pkg/readers/prisma` - Prisma schema reader
|
||||
- `pkg/readers/typeorm` - TypeScript TypeORM reader
|
||||
- `pkg/readers/pgsql` - PostgreSQL database reader
|
||||
- `pkg/readers/sqlite` - SQLite database reader
|
||||
|
||||
### Writer Implementations
|
||||
|
||||
Each writer is in its own subpackage under `pkg/writers/`:
|
||||
|
||||
- `pkg/writers/dbml` - DBML format writer
|
||||
- `pkg/writers/dctx` - DCTX format writer
|
||||
- `pkg/writers/drawdb` - DrawDB JSON writer
|
||||
- `pkg/writers/graphql` - GraphQL schema writer
|
||||
- `pkg/writers/json` - JSON schema writer
|
||||
- `pkg/writers/yaml` - YAML schema writer
|
||||
- `pkg/writers/gorm` - Go GORM models writer
|
||||
- `pkg/writers/bun` - Go Bun models writer
|
||||
- `pkg/writers/drizzle` - TypeScript Drizzle ORM writer
|
||||
- `pkg/writers/prisma` - Prisma schema writer
|
||||
- `pkg/writers/typeorm` - TypeScript TypeORM writer
|
||||
- `pkg/writers/pgsql` - PostgreSQL SQL writer
|
||||
- `pkg/writers/sqlite` - SQLite SQL writer
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Reading a Schema
|
||||
|
||||
```go
|
||||
import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/dbml"
|
||||
)
|
||||
|
||||
reader := dbml.NewReader(&readers.ReaderOptions{
|
||||
FilePath: "schema.dbml",
|
||||
})
|
||||
db, err := reader.ReadDatabase()
|
||||
```
|
||||
|
||||
### Writing a Schema
|
||||
|
||||
```go
|
||||
import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers/gorm"
|
||||
)
|
||||
|
||||
writer := gorm.NewWriter(&writers.WriterOptions{
|
||||
OutputPath: "./models",
|
||||
PackageName: "models",
|
||||
})
|
||||
err := writer.WriteDatabase(db)
|
||||
```
|
||||
|
||||
### Comparing Schemas
|
||||
|
||||
```go
|
||||
import "git.warky.dev/wdevs/relspecgo/pkg/diff"
|
||||
|
||||
result := diff.CompareDatabases(sourceDB, targetDB)
|
||||
err := diff.FormatDiff(result, diff.OutputFormatText, os.Stdout)
|
||||
```
|
||||
|
||||
### Merging Schemas
|
||||
|
||||
```go
|
||||
import "git.warky.dev/wdevs/relspecgo/pkg/merge"
|
||||
|
||||
result := merge.MergeDatabases(targetDB, sourceDB, nil)
|
||||
fmt.Printf("Added %d tables\n", result.TablesAdded)
|
||||
```
|
||||
|
||||
## Documentation Standards
|
||||
|
||||
All public APIs follow Go documentation conventions:
|
||||
|
||||
- Package documentation in `doc.go` files
|
||||
- Type, function, and method comments start with the item name
|
||||
- Examples where applicable
|
||||
- Clear description of parameters and return values
|
||||
- Usage notes and caveats where relevant
|
||||
|
||||
## Generating Documentation
|
||||
|
||||
To regenerate documentation after code changes:
|
||||
|
||||
```bash
|
||||
# Verify documentation builds correctly
|
||||
go doc -all ./pkg/... > /dev/null
|
||||
|
||||
# Check for undocumented exports
|
||||
go vet ./...
|
||||
```
|
||||
|
||||
## Contributing Documentation
|
||||
|
||||
When adding new packages or exported items:
|
||||
|
||||
1. Add package documentation in a `doc.go` file
|
||||
2. Document all exported types, functions, and methods
|
||||
3. Include usage examples for complex APIs
|
||||
4. Follow Go documentation style guide
|
||||
5. Verify with `go doc` before committing
|
||||
|
||||
## References
|
||||
|
||||
- [Go Documentation Guide](https://go.dev/doc/comment)
|
||||
- [Effective Go - Commentary](https://go.dev/doc/effective_go#commentary)
|
||||
- [godoc Documentation](https://pkg.go.dev/golang.org/x/tools/cmd/godoc)
|
||||
77
Makefile
77
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all build test test-unit test-integration lint coverage clean install help docker-up docker-down docker-test docker-test-integration start stop release release-version
|
||||
.PHONY: all build test test-unit test-integration lint coverage clean install help docker-up docker-down docker-test docker-test-integration start stop release release-version godoc
|
||||
|
||||
# Binary name
|
||||
BINARY_NAME=relspec
|
||||
@@ -14,6 +14,11 @@ GOGET=$(GOCMD) get
|
||||
GOMOD=$(GOCMD) mod
|
||||
GOCLEAN=$(GOCMD) clean
|
||||
|
||||
# Version information
|
||||
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
|
||||
BUILD_DATE := $(shell date -u +"%Y-%m-%d %H:%M:%S UTC")
|
||||
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.buildDate=$(BUILD_DATE)'
|
||||
|
||||
# Auto-detect container runtime (Docker or Podman)
|
||||
CONTAINER_RUNTIME := $(shell \
|
||||
if command -v podman > /dev/null 2>&1; then \
|
||||
@@ -37,9 +42,9 @@ COMPOSE_CMD := $(shell \
|
||||
all: lint test build ## Run linting, tests, and build
|
||||
|
||||
build: deps ## Build the binary
|
||||
@echo "Building $(BINARY_NAME)..."
|
||||
@echo "Building $(BINARY_NAME) $(VERSION)..."
|
||||
@mkdir -p $(BUILD_DIR)
|
||||
$(GOBUILD) -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/relspec
|
||||
$(GOBUILD) -ldflags "$(LDFLAGS)" -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/relspec
|
||||
@echo "Build complete: $(BUILD_DIR)/$(BINARY_NAME)"
|
||||
|
||||
test: test-unit ## Run all unit tests (alias for test-unit)
|
||||
@@ -91,8 +96,8 @@ clean: ## Clean build artifacts
|
||||
@echo "Clean complete"
|
||||
|
||||
install: ## Install the binary to $GOPATH/bin
|
||||
@echo "Installing $(BINARY_NAME)..."
|
||||
$(GOCMD) install ./cmd/relspec
|
||||
@echo "Installing $(BINARY_NAME) $(VERSION)..."
|
||||
$(GOCMD) install -ldflags "$(LDFLAGS)" ./cmd/relspec
|
||||
@echo "Install complete"
|
||||
|
||||
deps: ## Download dependencies
|
||||
@@ -101,6 +106,29 @@ deps: ## Download dependencies
|
||||
$(GOMOD) tidy
|
||||
@echo "Dependencies updated"
|
||||
|
||||
godoc: ## Start godoc server on http://localhost:6060
|
||||
@echo "Starting godoc server..."
|
||||
@GOBIN=$$(go env GOPATH)/bin; \
|
||||
if command -v godoc > /dev/null 2>&1; then \
|
||||
echo "godoc server running on http://localhost:6060"; \
|
||||
echo "View documentation at: http://localhost:6060/pkg/git.warky.dev/wdevs/relspecgo/"; \
|
||||
echo "Press Ctrl+C to stop"; \
|
||||
godoc -http=:6060; \
|
||||
elif [ -f "$$GOBIN/godoc" ]; then \
|
||||
echo "godoc server running on http://localhost:6060"; \
|
||||
echo "View documentation at: http://localhost:6060/pkg/git.warky.dev/wdevs/relspecgo/"; \
|
||||
echo "Press Ctrl+C to stop"; \
|
||||
$$GOBIN/godoc -http=:6060; \
|
||||
else \
|
||||
echo "godoc not installed. Installing..."; \
|
||||
go install golang.org/x/tools/cmd/godoc@latest; \
|
||||
echo "godoc installed. Starting server..."; \
|
||||
echo "godoc server running on http://localhost:6060"; \
|
||||
echo "View documentation at: http://localhost:6060/pkg/git.warky.dev/wdevs/relspecgo/"; \
|
||||
echo "Press Ctrl+C to stop"; \
|
||||
$$GOBIN/godoc -http=:6060; \
|
||||
fi
|
||||
|
||||
start: docker-up ## Alias for docker-up (start PostgreSQL test database)
|
||||
|
||||
stop: docker-down ## Alias for docker-down (stop PostgreSQL test database)
|
||||
@@ -176,30 +204,21 @@ release: ## Create and push a new release tag (auto-increments patch version)
|
||||
git push origin "$$version"; \
|
||||
echo "Tag $$version created and pushed to remote repository."
|
||||
|
||||
release-version: ## Create and push a release with specific version (use: make release-version VERSION=v1.2.3)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "Error: VERSION is required. Usage: make release-version VERSION=v1.2.3"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@version="$(VERSION)"; \
|
||||
if ! echo "$$version" | grep -q "^v"; then \
|
||||
version="v$$version"; \
|
||||
fi; \
|
||||
echo "Creating release: $$version"; \
|
||||
latest_tag=$$(git describe --tags --abbrev=0 2>/dev/null || echo ""); \
|
||||
if [ -z "$$latest_tag" ]; then \
|
||||
commit_logs=$$(git log --pretty=format:"- %s" --no-merges); \
|
||||
else \
|
||||
commit_logs=$$(git log "$${latest_tag}..HEAD" --pretty=format:"- %s" --no-merges); \
|
||||
fi; \
|
||||
if [ -z "$$commit_logs" ]; then \
|
||||
tag_message="Release $$version"; \
|
||||
else \
|
||||
tag_message="Release $$version\n\n$$commit_logs"; \
|
||||
fi; \
|
||||
git tag -a "$$version" -m "$$tag_message"; \
|
||||
git push origin "$$version"; \
|
||||
echo "Tag $$version created and pushed to remote repository."
|
||||
release-version: ## Auto-increment patch version, update package files, commit, tag, and push
|
||||
@CURRENT=$$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0"); \
|
||||
MAJOR=$$(echo $$CURRENT | sed 's/v\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/\1/'); \
|
||||
MINOR=$$(echo $$CURRENT | sed 's/v\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/\2/'); \
|
||||
PATCH=$$(echo $$CURRENT | sed 's/v\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/\3/'); \
|
||||
NEXT="v$$MAJOR.$$MINOR.$$((PATCH + 1))"; \
|
||||
PKGVER="$$MAJOR.$$MINOR.$$((PATCH + 1))"; \
|
||||
echo "Current: $$CURRENT → Next: $$NEXT"; \
|
||||
sed -i "s/^pkgver=.*/pkgver=$$PKGVER/" linux/arch/PKGBUILD; \
|
||||
sed -i "s/^Version:.*/Version: $$PKGVER/" linux/centos/relspec.spec; \
|
||||
git add linux/arch/PKGBUILD linux/centos/relspec.spec; \
|
||||
git commit -m "chore(release): update package version to $$PKGVER"; \
|
||||
git tag -a "$$NEXT" -m "Release $$NEXT"; \
|
||||
git push origin HEAD "$$NEXT"; \
|
||||
echo "Pushed $$NEXT — release workflow triggered"
|
||||
|
||||
help: ## Display this help screen
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
314
README.md
314
README.md
@@ -6,258 +6,160 @@
|
||||
[](https://go.dev/dl/)
|
||||
[](LICENSE)
|
||||
|
||||
> Database Relations Specification Tool for Go
|
||||
> Bidirectional database schema conversion, validation, and templating tool.
|
||||
|
||||
RelSpec is a comprehensive database relations management tool that reads, transforms, and writes database table specifications across multiple formats and ORMs.
|
||||

|
||||
|
||||
## Overview
|
||||
|
||||
RelSpec provides bidirectional conversion, comparison, and validation of database specification formats, allowing you to:
|
||||
- Inspect live databases and extract their structure
|
||||
- Validate schemas against configurable rules and naming conventions
|
||||
- Convert between different ORM models (GORM, Bun, etc.)
|
||||
- Transform legacy schema definitions (Clarion DCTX, XML, JSON, etc.)
|
||||
- Generate standardized specification files (JSON, YAML, etc.)
|
||||
- Compare database schemas and track changes
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
### Readers (Input Formats)
|
||||
|
||||
RelSpec can read database schemas from multiple sources:
|
||||
|
||||
#### ORM Models
|
||||
- [GORM](pkg/readers/gorm/README.md) - Go GORM model definitions
|
||||
- [Bun](pkg/readers/bun/README.md) - Go Bun model definitions
|
||||
- [Drizzle](pkg/readers/drizzle/README.md) - TypeScript Drizzle ORM schemas
|
||||
- [Prisma](pkg/readers/prisma/README.md) - Prisma schema language
|
||||
- [TypeORM](pkg/readers/typeorm/README.md) - TypeScript TypeORM entities
|
||||
|
||||
#### Database Inspection
|
||||
- [PostgreSQL](pkg/readers/pgsql/README.md) - Direct PostgreSQL database introspection
|
||||
|
||||
#### Schema Formats
|
||||
- [DBML](pkg/readers/dbml/README.md) - Database Markup Language (dbdiagram.io)
|
||||
- [DCTX](pkg/readers/dctx/README.md) - Clarion database dictionary format
|
||||
- [DrawDB](pkg/readers/drawdb/README.md) - DrawDB JSON format
|
||||
- [GraphQL](pkg/readers/graphql/README.md) - GraphQL Schema Definition Language (SDL)
|
||||
- [JSON](pkg/readers/json/README.md) - RelSpec canonical JSON format
|
||||
- [YAML](pkg/readers/yaml/README.md) - RelSpec canonical YAML format
|
||||
|
||||
### Writers (Output Formats)
|
||||
|
||||
RelSpec can write database schemas to multiple formats:
|
||||
|
||||
#### ORM Models
|
||||
- [GORM](pkg/writers/gorm/README.md) - Generate GORM-compatible Go structs
|
||||
- [Bun](pkg/writers/bun/README.md) - Generate Bun-compatible Go structs
|
||||
- [Drizzle](pkg/writers/drizzle/README.md) - Generate Drizzle ORM TypeScript schemas
|
||||
- [Prisma](pkg/writers/prisma/README.md) - Generate Prisma schema files
|
||||
- [TypeORM](pkg/writers/typeorm/README.md) - Generate TypeORM TypeScript entities
|
||||
|
||||
#### Database DDL
|
||||
- [PostgreSQL](pkg/writers/pgsql/README.md) - PostgreSQL DDL (CREATE TABLE, etc.)
|
||||
|
||||
#### Schema Formats
|
||||
- [DBML](pkg/writers/dbml/README.md) - Database Markup Language
|
||||
- [DCTX](pkg/writers/dctx/README.md) - Clarion database dictionary format
|
||||
- [DrawDB](pkg/writers/drawdb/README.md) - DrawDB JSON format
|
||||
- [GraphQL](pkg/writers/graphql/README.md) - GraphQL Schema Definition Language (SDL)
|
||||
- [JSON](pkg/writers/json/README.md) - RelSpec canonical JSON format
|
||||
- [YAML](pkg/writers/yaml/README.md) - RelSpec canonical YAML format
|
||||
|
||||
### Inspector (Schema Validation)
|
||||
|
||||
RelSpec includes a powerful schema validation and linting tool:
|
||||
|
||||
- [Inspector](pkg/inspector/README.md) - Validate database schemas against configurable rules
|
||||
- Enforce naming conventions (snake_case, camelCase, custom patterns)
|
||||
- Check primary key and foreign key standards
|
||||
- Detect missing indexes on foreign keys
|
||||
- Prevent use of SQL reserved keywords
|
||||
- Ensure schema integrity (missing PKs, orphaned FKs, circular dependencies)
|
||||
- Support for custom validation rules
|
||||
- Multiple output formats (Markdown with colors, JSON)
|
||||
- CI/CD integration ready
|
||||
|
||||
## Use of AI
|
||||
[Rules and use of AI](./AI_USE.md)
|
||||
|
||||
## User Interface
|
||||
|
||||
RelSpec provides an interactive terminal-based user interface for managing and editing database schemas. The UI allows you to:
|
||||
|
||||
- **Browse Databases** - Navigate through your database structure with an intuitive menu system
|
||||
- **Edit Schemas** - Create, modify, and organize database schemas
|
||||
- **Manage Tables** - Add, update, or delete tables with full control over structure
|
||||
- **Configure Columns** - Define column properties, data types, constraints, and relationships
|
||||
- **Interactive Editing** - Real-time validation and feedback as you make changes
|
||||
|
||||
The interface supports multiple input formats, making it easy to load, edit, and save your database definitions in various formats.
|
||||
|
||||
<p align="center" width="100%">
|
||||
<img src="./assets/image/screenshots/main_screen.jpg">
|
||||
</p>
|
||||
<p align="center" width="100%">
|
||||
<img src="./assets/image/screenshots/table_view.jpg">
|
||||
</p>
|
||||
<p align="center" width="100%">
|
||||
<img src="./assets/image/screenshots/edit_column.jpg">
|
||||
</p>
|
||||
|
||||
|
||||
## Installation
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/wdevs/relspecgo
|
||||
|
||||
go install -v git.warky.dev/wdevs/relspecgo/cmd/relspec@latest
|
||||
```
|
||||
|
||||
## Usage
|
||||
## Supported Formats
|
||||
|
||||
### Interactive Schema Editor
|
||||
| Direction | Formats |
|
||||
|-----------|---------|
|
||||
| **Readers** | `bun` `dbml` `dctx` `drawdb` `drizzle` `gorm` `graphql` `json` `mssql` `pgsql` `prisma` `sqldir` `sqlite` `typeorm` `yaml` |
|
||||
| **Writers** | `bun` `dbml` `dctx` `drawdb` `drizzle` `gorm` `graphql` `json` `mssql` `pgsql` `prisma` `sqlexec` `sqlite` `template` `typeorm` `yaml` |
|
||||
|
||||
## Commands
|
||||
|
||||
### `convert` — Schema conversion
|
||||
|
||||
```bash
|
||||
# Launch interactive editor with a DBML schema
|
||||
relspec edit --from dbml --from-path schema.dbml --to dbml --to-path schema.dbml
|
||||
# PostgreSQL → GORM models
|
||||
relspec convert --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
|
||||
--to gorm --to-path models/ --package models
|
||||
|
||||
# Edit PostgreSQL database in place
|
||||
relspec edit --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
|
||||
--to pgsql --to-conn "postgres://user:pass@localhost/mydb"
|
||||
# DBML → PostgreSQL DDL
|
||||
relspec convert --from dbml --from-path schema.dbml --to pgsql --to-path schema.sql
|
||||
|
||||
# Edit JSON schema and save as GORM models
|
||||
relspec edit --from json --from-path db.json --to gorm --to-path models/
|
||||
# PostgreSQL → SQLite (auto flattens schemas)
|
||||
relspec convert --from pgsql --from-conn "postgres://..." --to sqlite --to-path schema.sql
|
||||
|
||||
# Multiple input files merged
|
||||
relspec convert --from json --from-list "a.json,b.json" --to yaml --to-path merged.yaml
|
||||
```
|
||||
|
||||
The `edit` command launches an interactive terminal user interface where you can:
|
||||
- Browse and navigate your database structure
|
||||
- Create, modify, and delete schemas, tables, and columns
|
||||
- Configure column properties, constraints, and relationships
|
||||
- Save changes to various formats
|
||||
- Import and merge schemas from other databases
|
||||
|
||||
### Schema Merging
|
||||
### `merge` — Additive schema merge (never modifies existing items)
|
||||
|
||||
```bash
|
||||
# Merge two JSON schemas (additive merge - adds missing items only)
|
||||
# Merge two JSON schemas
|
||||
relspec merge --target json --target-path base.json \
|
||||
--source json --source-path additions.json \
|
||||
--output json --output-path merged.json
|
||||
|
||||
# Merge PostgreSQL database into JSON, skipping specific tables
|
||||
# Merge PostgreSQL into JSON, skipping tables
|
||||
relspec merge --target json --target-path current.json \
|
||||
--source pgsql --source-conn "postgres://user:pass@localhost/source_db" \
|
||||
--source pgsql --source-conn "postgres://user:pass@localhost/db" \
|
||||
--output json --output-path updated.json \
|
||||
--skip-tables "audit_log,temp_tables"
|
||||
|
||||
# Cross-format merge (DBML + YAML → JSON)
|
||||
relspec merge --target dbml --target-path base.dbml \
|
||||
--source yaml --source-path additions.yaml \
|
||||
--output json --output-path result.json \
|
||||
--skip-relations --skip-views
|
||||
```
|
||||
|
||||
The `merge` command combines two database schemas additively:
|
||||
- Adds missing schemas, tables, columns, and other objects
|
||||
- Never modifies or deletes existing items (safe operation)
|
||||
- Supports selective merging with skip options (domains, relations, enums, views, sequences, specific tables)
|
||||
- Works across any combination of supported formats
|
||||
- Perfect for integrating multiple schema definitions or applying patches
|
||||
Skip flags: `--skip-relations` `--skip-views` `--skip-domains` `--skip-enums` `--skip-sequences`
|
||||
|
||||
### Schema Conversion
|
||||
### `inspect` — Schema validation / linting
|
||||
|
||||
```bash
|
||||
# Convert PostgreSQL database to GORM models
|
||||
relspec convert --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
|
||||
--to gorm --to-path models/ --package models
|
||||
|
||||
# Convert GORM models to Bun
|
||||
relspec convert --from gorm --from-path models.go \
|
||||
--to bun --to-path bun_models.go --package models
|
||||
|
||||
# Export database schema to JSON
|
||||
relspec convert --from pgsql --from-conn "postgres://..." \
|
||||
--to json --to-path schema.json
|
||||
|
||||
# Convert DBML to PostgreSQL SQL
|
||||
relspec convert --from dbml --from-path schema.dbml \
|
||||
--to pgsql --to-path schema.sql
|
||||
```
|
||||
|
||||
### Schema Validation
|
||||
|
||||
```bash
|
||||
# Validate a PostgreSQL database with default rules
|
||||
# Validate PostgreSQL database
|
||||
relspec inspect --from pgsql --from-conn "postgres://user:pass@localhost/mydb"
|
||||
|
||||
# Validate DBML file with custom rules
|
||||
# Validate DBML with custom rules
|
||||
relspec inspect --from dbml --from-path schema.dbml --rules .relspec-rules.yaml
|
||||
|
||||
# Generate JSON validation report
|
||||
relspec inspect --from json --from-path db.json \
|
||||
--output-format json --output report.json
|
||||
# JSON report output
|
||||
relspec inspect --from json --from-path db.json --output-format json --output report.json
|
||||
|
||||
# Validate specific schema only
|
||||
# Filter to specific schema
|
||||
relspec inspect --from pgsql --from-conn "..." --schema public
|
||||
```
|
||||
|
||||
### Schema Comparison
|
||||
Rules: naming conventions, PK/FK standards, missing indexes, reserved keywords, circular dependencies.
|
||||
|
||||
### `diff` — Schema comparison
|
||||
|
||||
```bash
|
||||
# Compare two database schemas
|
||||
relspec diff --from pgsql --from-conn "postgres://localhost/db1" \
|
||||
--to pgsql --to-conn "postgres://localhost/db2"
|
||||
```
|
||||
|
||||
### `templ` — Custom template rendering
|
||||
|
||||
```bash
|
||||
# Render database schema to Markdown docs
|
||||
relspec templ --from pgsql --from-conn "postgres://user:pass@localhost/db" \
|
||||
--template docs.tmpl --output schema-docs.md
|
||||
|
||||
# One TypeScript file per table
|
||||
relspec templ --from dbml --from-path schema.dbml \
|
||||
--template ts-model.tmpl --mode table \
|
||||
--output ./models/ --filename-pattern "{{.Name | toCamelCase}}.ts"
|
||||
```
|
||||
|
||||
Modes: `database` (default) · `schema` · `table` · `script`
|
||||
|
||||
Template functions: string utils (`toCamelCase`, `toSnakeCase`, `pluralize`, …), type converters (`sqlToGo`, `sqlToTypeScript`, …), filters, loop helpers, safe access.
|
||||
|
||||
### `edit` — Interactive TUI editor
|
||||
|
||||
```bash
|
||||
# Edit DBML schema interactively
|
||||
relspec edit --from dbml --from-path schema.dbml --to dbml --to-path schema.dbml
|
||||
|
||||
# Edit live PostgreSQL database
|
||||
relspec edit --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
|
||||
--to pgsql --to-conn "postgres://user:pass@localhost/mydb"
|
||||
```
|
||||
|
||||
<p align="center">
|
||||
<img src="./assets/image/screenshots/main_screen.jpg">
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="./assets/image/screenshots/table_view.jpg">
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="./assets/image/screenshots/edit_column.jpg">
|
||||
</p>
|
||||
|
||||
## Development
|
||||
|
||||
**Prerequisites:** Go 1.24.0+
|
||||
|
||||
```bash
|
||||
make build # → build/relspec
|
||||
make test # race detection + coverage
|
||||
make lint # requires golangci-lint
|
||||
make coverage # → coverage.html
|
||||
make install # → $GOPATH/bin
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
relspecgo/
|
||||
├── cmd/
|
||||
│ └── relspec/ # CLI application (convert, inspect, diff, scripts)
|
||||
├── pkg/
|
||||
│ ├── readers/ # Input format readers (DBML, GORM, PostgreSQL, etc.)
|
||||
│ ├── writers/ # Output format writers (GORM, Bun, SQL, etc.)
|
||||
│ ├── inspector/ # Schema validation and linting
|
||||
│ ├── diff/ # Schema comparison
|
||||
│ ├── models/ # Internal data models
|
||||
│ ├── transform/ # Transformation logic
|
||||
│ └── pgsql/ # PostgreSQL utilities (keywords, data types)
|
||||
├── examples/ # Usage examples
|
||||
└── tests/ # Test files
|
||||
cmd/relspec/ CLI commands
|
||||
pkg/readers/ Input format readers
|
||||
pkg/writers/ Output format writers
|
||||
pkg/inspector/ Schema validation
|
||||
pkg/diff/ Schema comparison
|
||||
pkg/merge/ Schema merging
|
||||
pkg/models/ Internal data models
|
||||
pkg/transform/ Transformation logic
|
||||
pkg/pgsql/ PostgreSQL utilities
|
||||
```
|
||||
|
||||
## Todo
|
||||
|
||||
[Todo List of Features](./TODO.md)
|
||||
|
||||
## Development
|
||||
|
||||
### Prerequisites
|
||||
- Go 1.21 or higher
|
||||
- Access to test databases (optional)
|
||||
|
||||
### Building
|
||||
|
||||
```bash
|
||||
go build -o relspec ./cmd/relspec
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
go test ./...
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Apache License 2.0 - See [LICENSE](LICENSE) for details.
|
||||
|
||||
Copyright 2025 Warky Devs
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions welcome. Please open an issue or submit a pull request.
|
||||
1. Register or sign in with GitHub at [git.warky.dev](https://git.warky.dev)
|
||||
2. Clone the repository: `git clone https://git.warky.dev/wdevs/relspecgo.git`
|
||||
3. Create a feature branch: `git checkout -b feature/your-feature-name`
|
||||
4. Commit your changes and push the branch
|
||||
5. Open a pull request with a description of the new feature or fix
|
||||
|
||||
For questions or discussion, join the Discord: [discord.gg/74rcTujp25](https://discord.gg/74rcTujp25) — `warkyhein`
|
||||
|
||||
## Links
|
||||
|
||||
- [Todo](./TODO.md)
|
||||
- [AI Use Policy](./AI_USE.md)
|
||||
- [License](LICENSE) — Apache 2.0 · Copyright 2025 Warky Devs
|
||||
|
||||
219
Story.md
Normal file
219
Story.md
Normal file
@@ -0,0 +1,219 @@
|
||||
|
||||
# From Scripts to RelSpec: What Years of Database Pain Taught Me
|
||||
|
||||
It started as a need.
|
||||
A problem I’ve carried with me since my early PHP days.
|
||||
|
||||
Every project meant doing the same work again. Same patterns, same fixes—just in a different codebase.
|
||||
It became frustrating fast.
|
||||
|
||||
I wanted something solid. Not another workaround.
|
||||
|
||||
## The Early Tools Phase
|
||||
|
||||
Like most things in development, it began small.
|
||||
|
||||
A simple PHP script.
|
||||
Then a few Python scripts.
|
||||
|
||||
Just tools—nothing fancy. The goal was straightforward: generate code faster and remove repetitive work. I even experimented with Clarion templates at one point, trying to bend existing systems into something useful.
|
||||
|
||||
Then came SQL scripts.
|
||||
Then PostgreSQL migration stored procedures.
|
||||
Then small Go programs using templates.
|
||||
|
||||
Each step was solving a problem I had at the time. Nothing unified. Nothing polished. Just survival tools.
|
||||
|
||||
---
|
||||
|
||||
## Argitek: The First Real Attempt
|
||||
|
||||
Eventually, those scattered ideas turned into something more structured: Argitek.
|
||||
|
||||
Argitek powered a few real systems, including Powerbid. On paper, it sounded solid:
|
||||
|
||||
> “Argitek Next is a powerful code generation tool designed to streamline your development workflow.”
|
||||
|
||||
And technically, it worked.
|
||||
|
||||
It could generate code from predefined templates, adapt to different scenarios, and reduce repetitive work. But something was off.
|
||||
|
||||
It never felt *complete*.
|
||||
Not something I could confidently release.
|
||||
|
||||
So I did what many developers do with almost-good-enough tools—I parked it.
|
||||
|
||||
---
|
||||
|
||||
## The Breaking Point: Database Migrations
|
||||
|
||||
Over the years, one problem kept coming back:
|
||||
|
||||
Database migrations.
|
||||
|
||||
Not the clean, theoretical kind. The real ones.
|
||||
|
||||
* PostgreSQL to ORM mismatches
|
||||
* DBML to SQL hacks
|
||||
* GORM inconsistencies
|
||||
* Manual fixes after “automated” migrations failed
|
||||
|
||||
It was always messy. Always unpredictable. Always more work than expected.
|
||||
|
||||
By 2025, after a particularly tough year, I had accumulated enough of these problems to stop ignoring them.
|
||||
|
||||
---
|
||||
|
||||
## December 2025: RelSpecGo Begins
|
||||
|
||||
In December 2025, I bootstrapped something new:
|
||||
|
||||
**RelSpecGo**
|
||||
|
||||
It started simple:
|
||||
|
||||
* Initial LICENSE
|
||||
* Basic configuration
|
||||
* A direction
|
||||
|
||||
By late December:
|
||||
|
||||
* SQL writer implemented
|
||||
* Diff command added
|
||||
|
||||
January 2026:
|
||||
|
||||
* Documentation
|
||||
|
||||
February 2026:
|
||||
|
||||
* Schema editor UI (focused on relationships)
|
||||
* MSSQL DDL writer
|
||||
* Template support with `--from-list`
|
||||
|
||||
---
|
||||
|
||||
## April 2026: A Real Tool Emerges
|
||||
|
||||
By April 2026, it became something I could finally stand behind.
|
||||
|
||||
RelSpecGo reached version **1.0.44**, with:
|
||||
|
||||
* Packaging for AUR, Debian, and RPM
|
||||
* Updated documentation and README
|
||||
* A full toolchain for:
|
||||
|
||||
* Convert
|
||||
* Merge
|
||||
* Inspect
|
||||
* Diff
|
||||
* Template
|
||||
* Edit
|
||||
|
||||
Support includes:
|
||||
|
||||
* bun
|
||||
* dbml
|
||||
* drizzle
|
||||
* gorm
|
||||
* prisma
|
||||
* mssql
|
||||
* pgsql
|
||||
* sqlite
|
||||
|
||||
Plus:
|
||||
|
||||
* TUI editor
|
||||
* Template engine
|
||||
* Bidirectional schema handling
|
||||
|
||||
👉 RelSpecGo: [https://git.warky.dev/wdevs/relspecgo](https://git.warky.dev/wdevs/relspecgo)
|
||||
|
||||
This wasn’t just another generator anymore.
|
||||
It became a system for managing *database truth*.
|
||||
|
||||
---
|
||||
|
||||
## Lessons Learned (The Hard Way)
|
||||
|
||||
This journey wasn’t about tools. It was about understanding databases properly.
|
||||
|
||||
Here are the principles that stuck:
|
||||
|
||||
### 1. Data Loss Is Not Acceptable
|
||||
|
||||
Changing table structures should **never** result in lost data. If it does, the process is broken.
|
||||
|
||||
### 2. Minimal Beats Clever
|
||||
|
||||
The simpler the system, the easier it is to trust—and to fix.
|
||||
|
||||
### 3. Respect the Database
|
||||
|
||||
If you fight database rules, you will lose. Stay aligned with them.
|
||||
|
||||
### 4. Indexes and Keys Matter More Than You Think
|
||||
|
||||
Performance and correctness both depend on them. Ignore them at your own risk.
|
||||
|
||||
### 5. Version-Control Your Backend Logic
|
||||
|
||||
SQL scripts, functions, migrations—these must live in version control. No exceptions.
|
||||
|
||||
### 6. It’s Not Migration—It’s Adaptation
|
||||
|
||||
You’re not just moving data. You’re fixing inconsistencies and aligning systems.
|
||||
|
||||
### 7. Migrations Never Go as Planned
|
||||
|
||||
Always assume something will break. Plan for it.
|
||||
|
||||
### 8. One Source of Truth Is Non-Negotiable
|
||||
|
||||
Your database schema must have a single, authoritative definition.
|
||||
|
||||
### 9. ORM Mapping Is a First-Class Concern
|
||||
|
||||
Your application models must reflect the database correctly. Drift causes bugs.
|
||||
|
||||
### 10. Audit Trails Are Critical
|
||||
|
||||
If you can’t track changes, you can’t trust your system.
|
||||
|
||||
### 11. Manage Database Functions Properly
|
||||
|
||||
They are part of your system—not an afterthought.
|
||||
|
||||
### 12. If It’s Hard to Understand, It’s Too Complex
|
||||
|
||||
Clarity is a feature. Complexity is technical debt.
|
||||
|
||||
### 13. GUIDs Have Their Place
|
||||
|
||||
Especially when moving data across systems. They solve real problems.
|
||||
|
||||
### 14. But Simplicity Still Wins
|
||||
|
||||
Numbered primary keys are predictable, efficient, and easy to reason about.
|
||||
|
||||
### 15. JSON Is Power—Use It Carefully
|
||||
|
||||
It adds flexibility, but too much turns structure into chaos.
|
||||
|
||||
---
|
||||
|
||||
## Closing Thoughts
|
||||
|
||||
Looking back, this wasn’t about building a tool.
|
||||
|
||||
It was about:
|
||||
|
||||
* Reducing friction
|
||||
* Making systems predictable
|
||||
* Respecting the database as the core of the system
|
||||
|
||||
RelSpecGo is just the current result of that journey.
|
||||
|
||||
Not the end.
|
||||
|
||||
Just the first version that feels *right*.
|
||||
34
TODO.md
34
TODO.md
@@ -1,43 +1,44 @@
|
||||
# RelSpec - TODO List
|
||||
|
||||
|
||||
## Input Readers / Writers
|
||||
|
||||
- [✔️] **Database Inspector**
|
||||
- [✔️] PostgreSQL driver
|
||||
- [✔️] PostgreSQL driver (reader + writer)
|
||||
- [ ] MySQL driver
|
||||
- [ ] SQLite driver
|
||||
- [✔️] SQLite driver (reader + writer with automatic schema flattening)
|
||||
- [ ] MSSQL driver
|
||||
- [✔️] Foreign key detection
|
||||
- [✔️] Index extraction
|
||||
- [*] .sql file generation with sequence and priority
|
||||
- [✔️] .sql file generation (PostgreSQL, SQLite)
|
||||
- [✔️] .dbml: Database Markup Language (DBML) for textual schema representation.
|
||||
- [✔️] Prisma schema support (PSL format) .prisma
|
||||
- [✔️] Drizzle ORM support .ts (TypeScript / JavaScript) (Mr. Edd wanted to move from Prisma to Drizzle. If you are bugs, you are welcome to do pull requests or issues)
|
||||
- [☠️] Entity Framework (.NET) model .edmx (Fuck no, EDMX files were bloated, verbose XML nightmares—hard to merge, error-prone, and a pain in teams. Microsoft wisely ditched them in EF Core for code-first. Classic overkill from old MS era.)
|
||||
- [✔️] Drizzle ORM support .ts (TypeScript / JavaScript) (Mr. Edd wanted to move from Prisma to Drizzle. If you are bugs, you are welcome to do pull requests or issues)
|
||||
- [☠️] Entity Framework (.NET) model .edmx (Fuck no, EDMX files were bloated, verbose XML nightmares—hard to merge, error-prone, and a pain in teams. Microsoft wisely ditched them in EF Core for code-first. Classic overkill from old MS era.)
|
||||
- [✔️] TypeORM support
|
||||
- [] .hbm.xml / schema.xml: Hibernate/Propel mappings (Java/PHP) (💲 Someone can do this, not me)
|
||||
- [] .hbm.xml / schema.xml: Hibernate/Propel mappings (Java/PHP) (💲 Someone can do this, not me)
|
||||
- [ ] Django models.py (Python classes), Sequelize migrations (JS) (💲 Someone can do this, not me)
|
||||
- [] .avsc: Avro schema (JSON format for data serialization) (💲 Someone can do this, not me)
|
||||
- [✔️] GraphQL schema generation
|
||||
|
||||
## UI
|
||||
|
||||
## UI
|
||||
- [✔️] Basic UI (I went with tview)
|
||||
- [✔️] Save / Load Database
|
||||
- [✔️] Schemas / Domains / Tables
|
||||
- [ ] Add Relations
|
||||
- [ ] Add Indexes
|
||||
- [ ] Add Views
|
||||
- [ ] Add Sequences
|
||||
- [ ] Add Scripts
|
||||
- [ ] Domain / Table Assignment
|
||||
- [✔️] Add Relations
|
||||
- [ ] Add Indexes
|
||||
- [ ] Add Views
|
||||
- [ ] Add Sequences
|
||||
- [ ] Add Scripts
|
||||
- [ ] Domain / Table Assignment
|
||||
|
||||
## Documentation
|
||||
- [ ] API documentation (godoc)
|
||||
|
||||
- [✔️] API documentation (godoc)
|
||||
- [ ] Usage examples for each format combination
|
||||
|
||||
## Advanced Features
|
||||
|
||||
- [ ] Dry-run mode for validation
|
||||
- [x] Diff tool for comparing specifications
|
||||
- [ ] Migration script generation
|
||||
@@ -46,12 +47,13 @@
|
||||
- [ ] Watch mode for auto-regeneration
|
||||
|
||||
## Future Considerations
|
||||
|
||||
- [ ] Web UI for visual editing
|
||||
- [ ] REST API server mode
|
||||
- [ ] Support for NoSQL databases
|
||||
|
||||
|
||||
## Performance
|
||||
|
||||
- [ ] Concurrent processing for multiple tables
|
||||
- [ ] Streaming for large databases
|
||||
- [ ] Memory optimization
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 171 KiB After Width: | Height: | Size: 200 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 107 KiB After Width: | Height: | Size: 200 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 80 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 192 KiB |
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/merge"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/bun"
|
||||
@@ -18,8 +19,10 @@ import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/gorm"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/graphql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/mssql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/prisma"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/sqlite"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/typeorm"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
@@ -31,8 +34,10 @@ import (
|
||||
wgorm "git.warky.dev/wdevs/relspecgo/pkg/writers/gorm"
|
||||
wgraphql "git.warky.dev/wdevs/relspecgo/pkg/writers/graphql"
|
||||
wjson "git.warky.dev/wdevs/relspecgo/pkg/writers/json"
|
||||
wmssql "git.warky.dev/wdevs/relspecgo/pkg/writers/mssql"
|
||||
wpgsql "git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql"
|
||||
wprisma "git.warky.dev/wdevs/relspecgo/pkg/writers/prisma"
|
||||
wsqlite "git.warky.dev/wdevs/relspecgo/pkg/writers/sqlite"
|
||||
wtypeorm "git.warky.dev/wdevs/relspecgo/pkg/writers/typeorm"
|
||||
wyaml "git.warky.dev/wdevs/relspecgo/pkg/writers/yaml"
|
||||
)
|
||||
@@ -41,6 +46,7 @@ var (
|
||||
convertSourceType string
|
||||
convertSourcePath string
|
||||
convertSourceConn string
|
||||
convertFromList []string
|
||||
convertTargetType string
|
||||
convertTargetPath string
|
||||
convertPackageName string
|
||||
@@ -70,6 +76,8 @@ Input formats:
|
||||
- prisma: Prisma schema files (.prisma)
|
||||
- typeorm: TypeORM entity files (TypeScript)
|
||||
- pgsql: PostgreSQL database (live connection)
|
||||
- mssql: Microsoft SQL Server database (live connection)
|
||||
- sqlite: SQLite database file
|
||||
|
||||
Output formats:
|
||||
- dbml: DBML schema files
|
||||
@@ -84,13 +92,21 @@ Output formats:
|
||||
- prisma: Prisma schema files (.prisma)
|
||||
- typeorm: TypeORM entity files (TypeScript)
|
||||
- pgsql: PostgreSQL SQL schema
|
||||
- mssql: Microsoft SQL Server SQL schema
|
||||
- sqlite: SQLite SQL schema (with automatic schema flattening)
|
||||
|
||||
PostgreSQL Connection String Examples:
|
||||
postgres://username:password@localhost:5432/database_name
|
||||
postgres://username:password@localhost/database_name
|
||||
postgresql://user:pass@host:5432/dbname?sslmode=disable
|
||||
postgresql://user:pass@host/dbname?sslmode=require
|
||||
host=localhost port=5432 user=username password=pass dbname=mydb sslmode=disable
|
||||
Connection String Examples:
|
||||
PostgreSQL:
|
||||
postgres://username:password@localhost:5432/database_name
|
||||
postgres://username:password@localhost/database_name
|
||||
postgresql://user:pass@host:5432/dbname?sslmode=disable
|
||||
postgresql://user:pass@host/dbname?sslmode=require
|
||||
host=localhost port=5432 user=username password=pass dbname=mydb sslmode=disable
|
||||
|
||||
SQLite:
|
||||
/path/to/database.db
|
||||
./relative/path/database.sqlite
|
||||
database.db
|
||||
|
||||
|
||||
Examples:
|
||||
@@ -136,14 +152,23 @@ Examples:
|
||||
|
||||
# Convert Bun models directory to JSON
|
||||
relspec convert --from bun --from-path ./models \
|
||||
--to json --to-path schema.json`,
|
||||
--to json --to-path schema.json
|
||||
|
||||
# Convert SQLite database to JSON
|
||||
relspec convert --from sqlite --from-path database.db \
|
||||
--to json --to-path schema.json
|
||||
|
||||
# Convert SQLite to PostgreSQL SQL
|
||||
relspec convert --from sqlite --from-path database.db \
|
||||
--to pgsql --to-path schema.sql`,
|
||||
RunE: runConvert,
|
||||
}
|
||||
|
||||
func init() {
|
||||
convertCmd.Flags().StringVar(&convertSourceType, "from", "", "Source format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql)")
|
||||
convertCmd.Flags().StringVar(&convertSourceType, "from", "", "Source format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql, sqlite)")
|
||||
convertCmd.Flags().StringVar(&convertSourcePath, "from-path", "", "Source file path (for file-based formats)")
|
||||
convertCmd.Flags().StringVar(&convertSourceConn, "from-conn", "", "Source connection string (for database formats)")
|
||||
convertCmd.Flags().StringVar(&convertSourceConn, "from-conn", "", "Source connection string (for pgsql) or file path (for sqlite)")
|
||||
convertCmd.Flags().StringSliceVar(&convertFromList, "from-list", nil, "Comma-separated list of source file paths to read and merge (mutually exclusive with --from-path)")
|
||||
|
||||
convertCmd.Flags().StringVar(&convertTargetType, "to", "", "Target format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql)")
|
||||
convertCmd.Flags().StringVar(&convertTargetPath, "to-path", "", "Target output path (file or directory)")
|
||||
@@ -169,17 +194,29 @@ func runConvert(cmd *cobra.Command, args []string) error {
|
||||
fmt.Fprintf(os.Stderr, "\n=== RelSpec Schema Converter ===\n")
|
||||
fmt.Fprintf(os.Stderr, "Started at: %s\n\n", getCurrentTimestamp())
|
||||
|
||||
// Validate mutually exclusive flags
|
||||
if convertSourcePath != "" && len(convertFromList) > 0 {
|
||||
return fmt.Errorf("--from-path and --from-list are mutually exclusive")
|
||||
}
|
||||
|
||||
// Read source database
|
||||
fmt.Fprintf(os.Stderr, "[1/2] Reading source schema...\n")
|
||||
fmt.Fprintf(os.Stderr, " Format: %s\n", convertSourceType)
|
||||
if convertSourcePath != "" {
|
||||
fmt.Fprintf(os.Stderr, " Path: %s\n", convertSourcePath)
|
||||
}
|
||||
if convertSourceConn != "" {
|
||||
fmt.Fprintf(os.Stderr, " Conn: %s\n", maskPassword(convertSourceConn))
|
||||
}
|
||||
|
||||
db, err := readDatabaseForConvert(convertSourceType, convertSourcePath, convertSourceConn)
|
||||
var db *models.Database
|
||||
var err error
|
||||
|
||||
if len(convertFromList) > 0 {
|
||||
db, err = readDatabaseListForConvert(convertSourceType, convertFromList)
|
||||
} else {
|
||||
if convertSourcePath != "" {
|
||||
fmt.Fprintf(os.Stderr, " Path: %s\n", convertSourcePath)
|
||||
}
|
||||
if convertSourceConn != "" {
|
||||
fmt.Fprintf(os.Stderr, " Conn: %s\n", maskPassword(convertSourceConn))
|
||||
}
|
||||
db, err = readDatabaseForConvert(convertSourceType, convertSourcePath, convertSourceConn)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read source: %w", err)
|
||||
}
|
||||
@@ -215,6 +252,30 @@ func runConvert(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func readDatabaseListForConvert(dbType string, files []string) (*models.Database, error) {
|
||||
if len(files) == 0 {
|
||||
return nil, fmt.Errorf("file list is empty")
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, " Files: %d file(s)\n", len(files))
|
||||
|
||||
var base *models.Database
|
||||
for i, filePath := range files {
|
||||
fmt.Fprintf(os.Stderr, " [%d/%d] %s\n", i+1, len(files), filePath)
|
||||
db, err := readDatabaseForConvert(dbType, filePath, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read %s: %w", filePath, err)
|
||||
}
|
||||
if base == nil {
|
||||
base = db
|
||||
} else {
|
||||
merge.MergeDatabases(base, db, &merge.MergeOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
return base, nil
|
||||
}
|
||||
|
||||
func readDatabaseForConvert(dbType, filePath, connString string) (*models.Database, error) {
|
||||
var reader readers.Reader
|
||||
|
||||
@@ -291,6 +352,23 @@ func readDatabaseForConvert(dbType, filePath, connString string) (*models.Databa
|
||||
}
|
||||
reader = graphql.NewReader(&readers.ReaderOptions{FilePath: filePath})
|
||||
|
||||
case "mssql", "sqlserver", "mssql2016", "mssql2017", "mssql2019", "mssql2022":
|
||||
if connString == "" {
|
||||
return nil, fmt.Errorf("connection string is required for MSSQL format")
|
||||
}
|
||||
reader = mssql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
|
||||
|
||||
case "sqlite", "sqlite3":
|
||||
// SQLite can use either file path or connection string
|
||||
dbPath := filePath
|
||||
if dbPath == "" {
|
||||
dbPath = connString
|
||||
}
|
||||
if dbPath == "" {
|
||||
return nil, fmt.Errorf("file path or connection string is required for SQLite format")
|
||||
}
|
||||
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported source format: %s", dbType)
|
||||
}
|
||||
@@ -346,6 +424,12 @@ func writeDatabase(db *models.Database, dbType, outputPath, packageName, schemaF
|
||||
case "pgsql", "postgres", "postgresql", "sql":
|
||||
writer = wpgsql.NewWriter(writerOpts)
|
||||
|
||||
case "mssql", "sqlserver", "mssql2016", "mssql2017", "mssql2019", "mssql2022":
|
||||
writer = wmssql.NewWriter(writerOpts)
|
||||
|
||||
case "sqlite", "sqlite3":
|
||||
writer = wsqlite.NewWriter(writerOpts)
|
||||
|
||||
case "prisma":
|
||||
writer = wprisma.NewWriter(writerOpts)
|
||||
|
||||
|
||||
183
cmd/relspec/convert_from_list_test.go
Normal file
183
cmd/relspec/convert_from_list_test.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadDatabaseListForConvert_SingleFile(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
file := filepath.Join(dir, "schema.json")
|
||||
writeTestJSON(t, file, []string{"users"})
|
||||
|
||||
db, err := readDatabaseListForConvert("json", []string{file})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if len(db.Schemas) == 0 {
|
||||
t.Fatal("expected at least one schema")
|
||||
}
|
||||
if len(db.Schemas[0].Tables) != 1 {
|
||||
t.Errorf("expected 1 table, got %d", len(db.Schemas[0].Tables))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDatabaseListForConvert_MultipleFiles(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
file1 := filepath.Join(dir, "schema1.json")
|
||||
file2 := filepath.Join(dir, "schema2.json")
|
||||
writeTestJSON(t, file1, []string{"users"})
|
||||
writeTestJSON(t, file2, []string{"comments"})
|
||||
|
||||
db, err := readDatabaseListForConvert("json", []string{file1, file2})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
total := 0
|
||||
for _, s := range db.Schemas {
|
||||
total += len(s.Tables)
|
||||
}
|
||||
if total != 2 {
|
||||
t.Errorf("expected 2 tables (users + comments), got %d", total)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDatabaseListForConvert_PathWithSpaces(t *testing.T) {
|
||||
spacedDir := filepath.Join(t.TempDir(), "my schema files")
|
||||
if err := os.MkdirAll(spacedDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
file := filepath.Join(spacedDir, "my users schema.json")
|
||||
writeTestJSON(t, file, []string{"users"})
|
||||
|
||||
db, err := readDatabaseListForConvert("json", []string{file})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error with spaced path: %v", err)
|
||||
}
|
||||
if db == nil {
|
||||
t.Fatal("expected non-nil database")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDatabaseListForConvert_MultipleFilesPathWithSpaces(t *testing.T) {
|
||||
spacedDir := filepath.Join(t.TempDir(), "my schema files")
|
||||
if err := os.MkdirAll(spacedDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
file1 := filepath.Join(spacedDir, "users schema.json")
|
||||
file2 := filepath.Join(spacedDir, "posts schema.json")
|
||||
writeTestJSON(t, file1, []string{"users"})
|
||||
writeTestJSON(t, file2, []string{"posts"})
|
||||
|
||||
db, err := readDatabaseListForConvert("json", []string{file1, file2})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error with spaced paths: %v", err)
|
||||
}
|
||||
|
||||
total := 0
|
||||
for _, s := range db.Schemas {
|
||||
total += len(s.Tables)
|
||||
}
|
||||
if total != 2 {
|
||||
t.Errorf("expected 2 tables, got %d", total)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDatabaseListForConvert_EmptyList(t *testing.T) {
|
||||
_, err := readDatabaseListForConvert("json", []string{})
|
||||
if err == nil {
|
||||
t.Error("expected error for empty file list")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDatabaseListForConvert_InvalidFile(t *testing.T) {
|
||||
_, err := readDatabaseListForConvert("json", []string{"/nonexistent/path/file.json"})
|
||||
if err == nil {
|
||||
t.Error("expected error for nonexistent file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConvert_FromListMutuallyExclusiveWithFromPath(t *testing.T) {
|
||||
saved := saveConvertState()
|
||||
defer restoreConvertState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
file := filepath.Join(dir, "schema.json")
|
||||
writeTestJSON(t, file, []string{"users"})
|
||||
|
||||
convertSourceType = "json"
|
||||
convertSourcePath = file
|
||||
convertFromList = []string{file}
|
||||
convertTargetType = "json"
|
||||
convertTargetPath = filepath.Join(dir, "out.json")
|
||||
|
||||
err := runConvert(nil, nil)
|
||||
if err == nil {
|
||||
t.Error("expected error when --from-path and --from-list are both set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConvert_FromListEndToEnd(t *testing.T) {
|
||||
saved := saveConvertState()
|
||||
defer restoreConvertState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
file1 := filepath.Join(dir, "users.json")
|
||||
file2 := filepath.Join(dir, "posts.json")
|
||||
outFile := filepath.Join(dir, "merged.json")
|
||||
writeTestJSON(t, file1, []string{"users"})
|
||||
writeTestJSON(t, file2, []string{"posts"})
|
||||
|
||||
convertSourceType = "json"
|
||||
convertSourcePath = ""
|
||||
convertSourceConn = ""
|
||||
convertFromList = []string{file1, file2}
|
||||
convertTargetType = "json"
|
||||
convertTargetPath = outFile
|
||||
convertPackageName = ""
|
||||
convertSchemaFilter = ""
|
||||
convertFlattenSchema = false
|
||||
|
||||
if err := runConvert(nil, nil); err != nil {
|
||||
t.Fatalf("runConvert() error = %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(outFile); os.IsNotExist(err) {
|
||||
t.Error("expected output file to be created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConvert_FromListEndToEndPathWithSpaces(t *testing.T) {
|
||||
saved := saveConvertState()
|
||||
defer restoreConvertState(saved)
|
||||
|
||||
spacedDir := filepath.Join(t.TempDir(), "my schema dir")
|
||||
if err := os.MkdirAll(spacedDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
file1 := filepath.Join(spacedDir, "users schema.json")
|
||||
file2 := filepath.Join(spacedDir, "posts schema.json")
|
||||
outFile := filepath.Join(spacedDir, "merged output.json")
|
||||
writeTestJSON(t, file1, []string{"users"})
|
||||
writeTestJSON(t, file2, []string{"posts"})
|
||||
|
||||
convertSourceType = "json"
|
||||
convertSourcePath = ""
|
||||
convertSourceConn = ""
|
||||
convertFromList = []string{file1, file2}
|
||||
convertTargetType = "json"
|
||||
convertTargetPath = outFile
|
||||
convertPackageName = ""
|
||||
convertSchemaFilter = ""
|
||||
convertFlattenSchema = false
|
||||
|
||||
if err := runConvert(nil, nil); err != nil {
|
||||
t.Fatalf("runConvert() with spaced paths error = %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(outFile); os.IsNotExist(err) {
|
||||
t.Error("expected output file to be created")
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/drawdb"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/sqlite"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
||||
)
|
||||
|
||||
@@ -254,6 +255,17 @@ func readDatabase(dbType, filePath, connString, label string) (*models.Database,
|
||||
}
|
||||
reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
|
||||
|
||||
case "sqlite", "sqlite3":
|
||||
// SQLite can use either file path or connection string
|
||||
dbPath := filePath
|
||||
if dbPath == "" {
|
||||
dbPath = connString
|
||||
}
|
||||
if dbPath == "" {
|
||||
return nil, fmt.Errorf("%s: file path or connection string is required for SQLite format", label)
|
||||
}
|
||||
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: unsupported database format: %s", label, dbType)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/prisma"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/sqlite"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/typeorm"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/ui"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
wjson "git.warky.dev/wdevs/relspecgo/pkg/writers/json"
|
||||
wpgsql "git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql"
|
||||
wprisma "git.warky.dev/wdevs/relspecgo/pkg/writers/prisma"
|
||||
wsqlite "git.warky.dev/wdevs/relspecgo/pkg/writers/sqlite"
|
||||
wtypeorm "git.warky.dev/wdevs/relspecgo/pkg/writers/typeorm"
|
||||
wyaml "git.warky.dev/wdevs/relspecgo/pkg/writers/yaml"
|
||||
)
|
||||
@@ -73,6 +75,7 @@ Supports reading from and writing to all supported formats:
|
||||
- prisma: Prisma schema files (.prisma)
|
||||
- typeorm: TypeORM entity files (TypeScript)
|
||||
- pgsql: PostgreSQL database (live connection)
|
||||
- sqlite: SQLite database file
|
||||
|
||||
Output formats:
|
||||
- dbml: DBML schema files
|
||||
@@ -87,13 +90,19 @@ Supports reading from and writing to all supported formats:
|
||||
- prisma: Prisma schema files (.prisma)
|
||||
- typeorm: TypeORM entity files (TypeScript)
|
||||
- pgsql: PostgreSQL SQL schema
|
||||
- sqlite: SQLite SQL schema (with automatic schema flattening)
|
||||
|
||||
PostgreSQL Connection String Examples:
|
||||
postgres://username:password@localhost:5432/database_name
|
||||
postgres://username:password@localhost/database_name
|
||||
postgresql://user:pass@host:5432/dbname?sslmode=disable
|
||||
postgresql://user:pass@host/dbname?sslmode=require
|
||||
host=localhost port=5432 user=username password=pass dbname=mydb sslmode=disable
|
||||
Connection String Examples:
|
||||
PostgreSQL:
|
||||
postgres://username:password@localhost:5432/database_name
|
||||
postgres://username:password@localhost/database_name
|
||||
postgresql://user:pass@host:5432/dbname?sslmode=disable
|
||||
postgresql://user:pass@host/dbname?sslmode=require
|
||||
host=localhost port=5432 user=username password=pass dbname=mydb sslmode=disable
|
||||
SQLite:
|
||||
/path/to/database.db
|
||||
./relative/path/database.sqlite
|
||||
database.db
|
||||
|
||||
Examples:
|
||||
# Edit a DBML schema file
|
||||
@@ -107,15 +116,21 @@ Examples:
|
||||
relspec edit --from json --from-path db.json --to gorm --to-path models/
|
||||
|
||||
# Edit GORM models in place
|
||||
relspec edit --from gorm --from-path ./models --to gorm --to-path ./models`,
|
||||
relspec edit --from gorm --from-path ./models --to gorm --to-path ./models
|
||||
|
||||
# Edit SQLite database
|
||||
relspec edit --from sqlite --from-path database.db --to sqlite --to-path database.db
|
||||
|
||||
# Convert SQLite to DBML
|
||||
relspec edit --from sqlite --from-path database.db --to dbml --to-path schema.dbml`,
|
||||
RunE: runEdit,
|
||||
}
|
||||
|
||||
func init() {
|
||||
editCmd.Flags().StringVar(&editSourceType, "from", "", "Source format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql)")
|
||||
editCmd.Flags().StringVar(&editSourceType, "from", "", "Source format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql, sqlite)")
|
||||
editCmd.Flags().StringVar(&editSourcePath, "from-path", "", "Source file path (for file-based formats)")
|
||||
editCmd.Flags().StringVar(&editSourceConn, "from-conn", "", "Source connection string (for database formats)")
|
||||
editCmd.Flags().StringVar(&editTargetType, "to", "", "Target format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql)")
|
||||
editCmd.Flags().StringVar(&editSourceConn, "from-conn", "", "Source connection string (for pgsql) or file path (for sqlite)")
|
||||
editCmd.Flags().StringVar(&editTargetType, "to", "", "Target format (dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql, sqlite)")
|
||||
editCmd.Flags().StringVar(&editTargetPath, "to-path", "", "Target file path (for file-based formats)")
|
||||
editCmd.Flags().StringVar(&editSchemaFilter, "schema", "", "Filter to a specific schema by name")
|
||||
|
||||
@@ -281,6 +296,16 @@ func readDatabaseForEdit(dbType, filePath, connString, label string) (*models.Da
|
||||
return nil, fmt.Errorf("%s: connection string is required for PostgreSQL format", label)
|
||||
}
|
||||
reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
|
||||
case "sqlite", "sqlite3":
|
||||
// SQLite can use either file path or connection string
|
||||
dbPath := filePath
|
||||
if dbPath == "" {
|
||||
dbPath = connString
|
||||
}
|
||||
if dbPath == "" {
|
||||
return nil, fmt.Errorf("%s: file path or connection string is required for SQLite format", label)
|
||||
}
|
||||
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: unsupported format: %s", label, dbType)
|
||||
}
|
||||
@@ -319,6 +344,8 @@ func writeDatabaseForEdit(dbType, filePath, connString string, db *models.Databa
|
||||
writer = wprisma.NewWriter(&writers.WriterOptions{OutputPath: filePath})
|
||||
case "typeorm":
|
||||
writer = wtypeorm.NewWriter(&writers.WriterOptions{OutputPath: filePath})
|
||||
case "sqlite", "sqlite3":
|
||||
writer = wsqlite.NewWriter(&writers.WriterOptions{OutputPath: filePath})
|
||||
case "pgsql":
|
||||
writer = wpgsql.NewWriter(&writers.WriterOptions{OutputPath: filePath})
|
||||
default:
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/prisma"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/sqlite"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/typeorm"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
||||
)
|
||||
@@ -288,6 +289,17 @@ func readDatabaseForInspect(dbType, filePath, connString string) (*models.Databa
|
||||
}
|
||||
reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
|
||||
|
||||
case "sqlite", "sqlite3":
|
||||
// SQLite can use either file path or connection string
|
||||
dbPath := filePath
|
||||
if dbPath == "" {
|
||||
dbPath = connString
|
||||
}
|
||||
if dbPath == "" {
|
||||
return nil, fmt.Errorf("file path or connection string is required for SQLite format")
|
||||
}
|
||||
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s", dbType)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/json"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/prisma"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/sqlite"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/typeorm"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/yaml"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
@@ -34,6 +35,7 @@ import (
|
||||
wjson "git.warky.dev/wdevs/relspecgo/pkg/writers/json"
|
||||
wpgsql "git.warky.dev/wdevs/relspecgo/pkg/writers/pgsql"
|
||||
wprisma "git.warky.dev/wdevs/relspecgo/pkg/writers/prisma"
|
||||
wsqlite "git.warky.dev/wdevs/relspecgo/pkg/writers/sqlite"
|
||||
wtypeorm "git.warky.dev/wdevs/relspecgo/pkg/writers/typeorm"
|
||||
wyaml "git.warky.dev/wdevs/relspecgo/pkg/writers/yaml"
|
||||
)
|
||||
@@ -45,6 +47,7 @@ var (
|
||||
mergeSourceType string
|
||||
mergeSourcePath string
|
||||
mergeSourceConn string
|
||||
mergeFromList []string
|
||||
mergeOutputType string
|
||||
mergeOutputPath string
|
||||
mergeOutputConn string
|
||||
@@ -107,8 +110,9 @@ func init() {
|
||||
|
||||
// Source database flags
|
||||
mergeCmd.Flags().StringVar(&mergeSourceType, "source", "", "Source format (required): dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql")
|
||||
mergeCmd.Flags().StringVar(&mergeSourcePath, "source-path", "", "Source file path (required for file-based formats)")
|
||||
mergeCmd.Flags().StringVar(&mergeSourcePath, "source-path", "", "Source file path (required for file-based formats, mutually exclusive with --from-list)")
|
||||
mergeCmd.Flags().StringVar(&mergeSourceConn, "source-conn", "", "Source connection string (required for pgsql)")
|
||||
mergeCmd.Flags().StringSliceVar(&mergeFromList, "from-list", nil, "Comma-separated list of source file paths to merge (mutually exclusive with --source-path)")
|
||||
|
||||
// Output flags
|
||||
mergeCmd.Flags().StringVar(&mergeOutputType, "output", "", "Output format (required): dbml, dctx, drawdb, graphql, json, yaml, gorm, bun, drizzle, prisma, typeorm, pgsql")
|
||||
@@ -142,6 +146,11 @@ func runMerge(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("--output format is required")
|
||||
}
|
||||
|
||||
// Validate mutually exclusive source flags
|
||||
if mergeSourcePath != "" && len(mergeFromList) > 0 {
|
||||
return fmt.Errorf("--source-path and --from-list are mutually exclusive")
|
||||
}
|
||||
|
||||
// Validate and expand file paths
|
||||
if mergeTargetType != "pgsql" {
|
||||
if mergeTargetPath == "" {
|
||||
@@ -155,8 +164,8 @@ func runMerge(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
if mergeSourceType != "pgsql" {
|
||||
if mergeSourcePath == "" {
|
||||
return fmt.Errorf("--source-path is required for %s format", mergeSourceType)
|
||||
if mergeSourcePath == "" && len(mergeFromList) == 0 {
|
||||
return fmt.Errorf("--source-path or --from-list is required for %s format", mergeSourceType)
|
||||
}
|
||||
mergeSourcePath = expandPath(mergeSourcePath)
|
||||
} else if mergeSourceConn == "" {
|
||||
@@ -187,19 +196,36 @@ func runMerge(cmd *cobra.Command, args []string) error {
|
||||
fmt.Fprintf(os.Stderr, " ✓ Successfully read target database '%s'\n", targetDB.Name)
|
||||
printDatabaseStats(targetDB)
|
||||
|
||||
// Step 2: Read source database
|
||||
// Step 2: Read source database(s)
|
||||
fmt.Fprintf(os.Stderr, "\n[2/3] Reading source database...\n")
|
||||
fmt.Fprintf(os.Stderr, " Format: %s\n", mergeSourceType)
|
||||
if mergeSourcePath != "" {
|
||||
fmt.Fprintf(os.Stderr, " Path: %s\n", mergeSourcePath)
|
||||
}
|
||||
if mergeSourceConn != "" {
|
||||
fmt.Fprintf(os.Stderr, " Conn: %s\n", maskPassword(mergeSourceConn))
|
||||
}
|
||||
|
||||
sourceDB, err := readDatabaseForMerge(mergeSourceType, mergeSourcePath, mergeSourceConn, "Source")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read source database: %w", err)
|
||||
var sourceDB *models.Database
|
||||
if len(mergeFromList) > 0 {
|
||||
fmt.Fprintf(os.Stderr, " Files: %d file(s)\n", len(mergeFromList))
|
||||
for i, filePath := range mergeFromList {
|
||||
fmt.Fprintf(os.Stderr, " [%d/%d] %s\n", i+1, len(mergeFromList), filePath)
|
||||
db, readErr := readDatabaseForMerge(mergeSourceType, expandPath(filePath), "", "Source")
|
||||
if readErr != nil {
|
||||
return fmt.Errorf("failed to read source file %s: %w", filePath, readErr)
|
||||
}
|
||||
if sourceDB == nil {
|
||||
sourceDB = db
|
||||
} else {
|
||||
merge.MergeDatabases(sourceDB, db, &merge.MergeOptions{})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if mergeSourcePath != "" {
|
||||
fmt.Fprintf(os.Stderr, " Path: %s\n", mergeSourcePath)
|
||||
}
|
||||
if mergeSourceConn != "" {
|
||||
fmt.Fprintf(os.Stderr, " Conn: %s\n", maskPassword(mergeSourceConn))
|
||||
}
|
||||
sourceDB, err = readDatabaseForMerge(mergeSourceType, mergeSourcePath, mergeSourceConn, "Source")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read source database: %w", err)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, " ✓ Successfully read source database '%s'\n", sourceDB.Name)
|
||||
printDatabaseStats(sourceDB)
|
||||
@@ -314,6 +340,16 @@ func readDatabaseForMerge(dbType, filePath, connString, label string) (*models.D
|
||||
return nil, fmt.Errorf("%s: connection string is required for PostgreSQL format", label)
|
||||
}
|
||||
reader = pgsql.NewReader(&readers.ReaderOptions{ConnectionString: connString})
|
||||
case "sqlite", "sqlite3":
|
||||
// SQLite can use either file path or connection string
|
||||
dbPath := filePath
|
||||
if dbPath == "" {
|
||||
dbPath = connString
|
||||
}
|
||||
if dbPath == "" {
|
||||
return nil, fmt.Errorf("%s: file path or connection string is required for SQLite format", label)
|
||||
}
|
||||
reader = sqlite.NewReader(&readers.ReaderOptions{FilePath: dbPath})
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: unsupported format '%s'", label, dbType)
|
||||
}
|
||||
@@ -385,6 +421,8 @@ func writeDatabaseForMerge(dbType, filePath, connString string, db *models.Datab
|
||||
return fmt.Errorf("%s: file path is required for TypeORM format", label)
|
||||
}
|
||||
writer = wtypeorm.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
|
||||
case "sqlite", "sqlite3":
|
||||
writer = wsqlite.NewWriter(&writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema})
|
||||
case "pgsql":
|
||||
writerOpts := &writers.WriterOptions{OutputPath: filePath, FlattenSchema: flattenSchema}
|
||||
if connString != "" {
|
||||
|
||||
162
cmd/relspec/merge_from_list_test.go
Normal file
162
cmd/relspec/merge_from_list_test.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRunMerge_FromListMutuallyExclusiveWithSourcePath(t *testing.T) {
|
||||
saved := saveMergeState()
|
||||
defer restoreMergeState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
file := filepath.Join(dir, "schema.json")
|
||||
writeTestJSON(t, file, []string{"users"})
|
||||
|
||||
mergeTargetType = "json"
|
||||
mergeTargetPath = file
|
||||
mergeTargetConn = ""
|
||||
mergeSourceType = "json"
|
||||
mergeSourcePath = file
|
||||
mergeSourceConn = ""
|
||||
mergeFromList = []string{file}
|
||||
mergeOutputType = "json"
|
||||
mergeOutputPath = filepath.Join(dir, "out.json")
|
||||
mergeOutputConn = ""
|
||||
mergeSkipTables = ""
|
||||
mergeReportPath = ""
|
||||
|
||||
err := runMerge(nil, nil)
|
||||
if err == nil {
|
||||
t.Error("expected error when --source-path and --from-list are both set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunMerge_FromListSingleFile(t *testing.T) {
|
||||
saved := saveMergeState()
|
||||
defer restoreMergeState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
targetFile := filepath.Join(dir, "target.json")
|
||||
sourceFile := filepath.Join(dir, "source.json")
|
||||
outFile := filepath.Join(dir, "output.json")
|
||||
writeTestJSON(t, targetFile, []string{"users"})
|
||||
writeTestJSON(t, sourceFile, []string{"posts"})
|
||||
|
||||
mergeTargetType = "json"
|
||||
mergeTargetPath = targetFile
|
||||
mergeTargetConn = ""
|
||||
mergeSourceType = "json"
|
||||
mergeSourcePath = ""
|
||||
mergeSourceConn = ""
|
||||
mergeFromList = []string{sourceFile}
|
||||
mergeOutputType = "json"
|
||||
mergeOutputPath = outFile
|
||||
mergeOutputConn = ""
|
||||
mergeSkipTables = ""
|
||||
mergeReportPath = ""
|
||||
|
||||
if err := runMerge(nil, nil); err != nil {
|
||||
t.Fatalf("runMerge() error = %v", err)
|
||||
}
|
||||
if _, err := os.Stat(outFile); os.IsNotExist(err) {
|
||||
t.Error("expected output file to be created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunMerge_FromListMultipleFiles(t *testing.T) {
|
||||
saved := saveMergeState()
|
||||
defer restoreMergeState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
targetFile := filepath.Join(dir, "target.json")
|
||||
source1 := filepath.Join(dir, "source1.json")
|
||||
source2 := filepath.Join(dir, "source2.json")
|
||||
outFile := filepath.Join(dir, "output.json")
|
||||
writeTestJSON(t, targetFile, []string{"users"})
|
||||
writeTestJSON(t, source1, []string{"posts"})
|
||||
writeTestJSON(t, source2, []string{"comments"})
|
||||
|
||||
mergeTargetType = "json"
|
||||
mergeTargetPath = targetFile
|
||||
mergeTargetConn = ""
|
||||
mergeSourceType = "json"
|
||||
mergeSourcePath = ""
|
||||
mergeSourceConn = ""
|
||||
mergeFromList = []string{source1, source2}
|
||||
mergeOutputType = "json"
|
||||
mergeOutputPath = outFile
|
||||
mergeOutputConn = ""
|
||||
mergeSkipTables = ""
|
||||
mergeReportPath = ""
|
||||
|
||||
if err := runMerge(nil, nil); err != nil {
|
||||
t.Fatalf("runMerge() error = %v", err)
|
||||
}
|
||||
if _, err := os.Stat(outFile); os.IsNotExist(err) {
|
||||
t.Error("expected output file to be created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunMerge_FromListPathWithSpaces(t *testing.T) {
|
||||
saved := saveMergeState()
|
||||
defer restoreMergeState(saved)
|
||||
|
||||
spacedDir := filepath.Join(t.TempDir(), "my schema files")
|
||||
if err := os.MkdirAll(spacedDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
targetFile := filepath.Join(spacedDir, "target schema.json")
|
||||
sourceFile := filepath.Join(spacedDir, "source schema.json")
|
||||
outFile := filepath.Join(spacedDir, "merged output.json")
|
||||
writeTestJSON(t, targetFile, []string{"users"})
|
||||
writeTestJSON(t, sourceFile, []string{"comments"})
|
||||
|
||||
mergeTargetType = "json"
|
||||
mergeTargetPath = targetFile
|
||||
mergeTargetConn = ""
|
||||
mergeSourceType = "json"
|
||||
mergeSourcePath = ""
|
||||
mergeSourceConn = ""
|
||||
mergeFromList = []string{sourceFile}
|
||||
mergeOutputType = "json"
|
||||
mergeOutputPath = outFile
|
||||
mergeOutputConn = ""
|
||||
mergeSkipTables = ""
|
||||
mergeReportPath = ""
|
||||
|
||||
if err := runMerge(nil, nil); err != nil {
|
||||
t.Fatalf("runMerge() with spaced paths error = %v", err)
|
||||
}
|
||||
if _, err := os.Stat(outFile); os.IsNotExist(err) {
|
||||
t.Error("expected output file to be created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunMerge_FromListMissingSourceType(t *testing.T) {
|
||||
saved := saveMergeState()
|
||||
defer restoreMergeState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
file := filepath.Join(dir, "schema.json")
|
||||
writeTestJSON(t, file, []string{"users"})
|
||||
|
||||
mergeTargetType = "json"
|
||||
mergeTargetPath = file
|
||||
mergeTargetConn = ""
|
||||
mergeSourceType = "json"
|
||||
mergeSourcePath = ""
|
||||
mergeSourceConn = ""
|
||||
mergeFromList = []string{} // empty list, no source-path either
|
||||
mergeOutputType = "json"
|
||||
mergeOutputPath = filepath.Join(dir, "out.json")
|
||||
mergeOutputConn = ""
|
||||
mergeSkipTables = ""
|
||||
mergeReportPath = ""
|
||||
|
||||
err := runMerge(nil, nil)
|
||||
if err == nil {
|
||||
t.Error("expected error when neither --source-path nor --from-list is provided")
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,49 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
// Version information, set via ldflags during build
|
||||
version = "dev"
|
||||
buildDate = "unknown"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// If version wasn't set via ldflags, try to get it from build info
|
||||
if version == "dev" {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
// Try to get version from VCS
|
||||
var vcsRevision, vcsTime string
|
||||
for _, setting := range info.Settings {
|
||||
switch setting.Key {
|
||||
case "vcs.revision":
|
||||
if len(setting.Value) >= 7 {
|
||||
vcsRevision = setting.Value[:7]
|
||||
}
|
||||
case "vcs.time":
|
||||
vcsTime = setting.Value
|
||||
}
|
||||
}
|
||||
|
||||
if vcsRevision != "" {
|
||||
version = vcsRevision
|
||||
}
|
||||
|
||||
if vcsTime != "" {
|
||||
if t, err := time.Parse(time.RFC3339, vcsTime); err == nil {
|
||||
buildDate = t.UTC().Format("2006-01-02 15:04:05 UTC")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "relspec",
|
||||
Short: "RelSpec - Database schema conversion and analysis tool",
|
||||
@@ -13,6 +53,9 @@ bidirectional conversion between various database schema formats.
|
||||
It reads database schemas from multiple sources (live databases, DBML,
|
||||
DCTX, DrawDB, etc.) and writes them to various formats (GORM, Bun,
|
||||
JSON, YAML, SQL, etc.).`,
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("RelSpec %s (built: %s)\n\n", version, buildDate)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -24,4 +67,5 @@ func init() {
|
||||
rootCmd.AddCommand(editCmd)
|
||||
rootCmd.AddCommand(mergeCmd)
|
||||
rootCmd.AddCommand(splitCmd)
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ var (
|
||||
templSourceType string
|
||||
templSourcePath string
|
||||
templSourceConn string
|
||||
templFromList []string
|
||||
templTemplatePath string
|
||||
templOutputPath string
|
||||
templSchemaFilter string
|
||||
@@ -78,8 +79,9 @@ Examples:
|
||||
|
||||
func init() {
|
||||
templCmd.Flags().StringVar(&templSourceType, "from", "", "Source format (dbml, pgsql, json, etc.)")
|
||||
templCmd.Flags().StringVar(&templSourcePath, "from-path", "", "Source file path (for file-based sources)")
|
||||
templCmd.Flags().StringVar(&templSourcePath, "from-path", "", "Source file path (for file-based sources, mutually exclusive with --from-list)")
|
||||
templCmd.Flags().StringVar(&templSourceConn, "from-conn", "", "Source connection string (for database sources)")
|
||||
templCmd.Flags().StringSliceVar(&templFromList, "from-list", nil, "Comma-separated list of source file paths to read and merge (mutually exclusive with --from-path)")
|
||||
templCmd.Flags().StringVar(&templTemplatePath, "template", "", "Template file path (required)")
|
||||
templCmd.Flags().StringVar(&templOutputPath, "output", "", "Output path (file or directory, empty for stdout)")
|
||||
templCmd.Flags().StringVar(&templSchemaFilter, "schema", "", "Filter to specific schema")
|
||||
@@ -95,9 +97,20 @@ func runTempl(cmd *cobra.Command, args []string) error {
|
||||
fmt.Fprintf(os.Stderr, "=== RelSpec Template Execution ===\n")
|
||||
fmt.Fprintf(os.Stderr, "Started at: %s\n\n", getCurrentTimestamp())
|
||||
|
||||
// Validate mutually exclusive flags
|
||||
if templSourcePath != "" && len(templFromList) > 0 {
|
||||
return fmt.Errorf("--from-path and --from-list are mutually exclusive")
|
||||
}
|
||||
|
||||
// Read database using the same function as convert
|
||||
fmt.Fprintf(os.Stderr, "Reading from %s...\n", templSourceType)
|
||||
db, err := readDatabaseForConvert(templSourceType, templSourcePath, templSourceConn)
|
||||
var db *models.Database
|
||||
var err error
|
||||
if len(templFromList) > 0 {
|
||||
db, err = readDatabaseListForConvert(templSourceType, templFromList)
|
||||
} else {
|
||||
db, err = readDatabaseForConvert(templSourceType, templSourcePath, templSourceConn)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read source: %w", err)
|
||||
}
|
||||
|
||||
134
cmd/relspec/templ_from_list_test.go
Normal file
134
cmd/relspec/templ_from_list_test.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// writeTestTemplate writes a minimal Go text template file.
|
||||
func writeTestTemplate(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
content := []byte(`{{.Name}}`)
|
||||
if err := os.WriteFile(path, content, 0644); err != nil {
|
||||
t.Fatalf("failed to write template file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunTempl_FromListMutuallyExclusiveWithFromPath(t *testing.T) {
|
||||
saved := saveTemplState()
|
||||
defer restoreTemplState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
file := filepath.Join(dir, "schema.json")
|
||||
tmpl := filepath.Join(dir, "tmpl.tmpl")
|
||||
writeTestJSON(t, file, []string{"users"})
|
||||
writeTestTemplate(t, tmpl)
|
||||
|
||||
templSourceType = "json"
|
||||
templSourcePath = file
|
||||
templFromList = []string{file}
|
||||
templTemplatePath = tmpl
|
||||
templOutputPath = ""
|
||||
templMode = "database"
|
||||
templFilenamePattern = "{{.Name}}.txt"
|
||||
|
||||
err := runTempl(nil, nil)
|
||||
if err == nil {
|
||||
t.Error("expected error when --from-path and --from-list are both set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunTempl_FromListSingleFile(t *testing.T) {
|
||||
saved := saveTemplState()
|
||||
defer restoreTemplState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
file := filepath.Join(dir, "schema.json")
|
||||
tmpl := filepath.Join(dir, "tmpl.tmpl")
|
||||
outFile := filepath.Join(dir, "output.txt")
|
||||
writeTestJSON(t, file, []string{"users"})
|
||||
writeTestTemplate(t, tmpl)
|
||||
|
||||
templSourceType = "json"
|
||||
templSourcePath = ""
|
||||
templSourceConn = ""
|
||||
templFromList = []string{file}
|
||||
templTemplatePath = tmpl
|
||||
templOutputPath = outFile
|
||||
templSchemaFilter = ""
|
||||
templMode = "database"
|
||||
templFilenamePattern = "{{.Name}}.txt"
|
||||
|
||||
if err := runTempl(nil, nil); err != nil {
|
||||
t.Fatalf("runTempl() error = %v", err)
|
||||
}
|
||||
if _, err := os.Stat(outFile); os.IsNotExist(err) {
|
||||
t.Error("expected output file to be created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunTempl_FromListMultipleFiles(t *testing.T) {
|
||||
saved := saveTemplState()
|
||||
defer restoreTemplState(saved)
|
||||
|
||||
dir := t.TempDir()
|
||||
file1 := filepath.Join(dir, "users.json")
|
||||
file2 := filepath.Join(dir, "posts.json")
|
||||
tmpl := filepath.Join(dir, "tmpl.tmpl")
|
||||
outFile := filepath.Join(dir, "output.txt")
|
||||
writeTestJSON(t, file1, []string{"users"})
|
||||
writeTestJSON(t, file2, []string{"posts"})
|
||||
writeTestTemplate(t, tmpl)
|
||||
|
||||
templSourceType = "json"
|
||||
templSourcePath = ""
|
||||
templSourceConn = ""
|
||||
templFromList = []string{file1, file2}
|
||||
templTemplatePath = tmpl
|
||||
templOutputPath = outFile
|
||||
templSchemaFilter = ""
|
||||
templMode = "database"
|
||||
templFilenamePattern = "{{.Name}}.txt"
|
||||
|
||||
if err := runTempl(nil, nil); err != nil {
|
||||
t.Fatalf("runTempl() error = %v", err)
|
||||
}
|
||||
if _, err := os.Stat(outFile); os.IsNotExist(err) {
|
||||
t.Error("expected output file to be created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunTempl_FromListPathWithSpaces(t *testing.T) {
|
||||
saved := saveTemplState()
|
||||
defer restoreTemplState(saved)
|
||||
|
||||
spacedDir := filepath.Join(t.TempDir(), "my schema files")
|
||||
if err := os.MkdirAll(spacedDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
file1 := filepath.Join(spacedDir, "users schema.json")
|
||||
file2 := filepath.Join(spacedDir, "posts schema.json")
|
||||
tmpl := filepath.Join(spacedDir, "my template.tmpl")
|
||||
outFile := filepath.Join(spacedDir, "output file.txt")
|
||||
writeTestJSON(t, file1, []string{"users"})
|
||||
writeTestJSON(t, file2, []string{"posts"})
|
||||
writeTestTemplate(t, tmpl)
|
||||
|
||||
templSourceType = "json"
|
||||
templSourcePath = ""
|
||||
templSourceConn = ""
|
||||
templFromList = []string{file1, file2}
|
||||
templTemplatePath = tmpl
|
||||
templOutputPath = outFile
|
||||
templSchemaFilter = ""
|
||||
templMode = "database"
|
||||
templFilenamePattern = "{{.Name}}.txt"
|
||||
|
||||
if err := runTempl(nil, nil); err != nil {
|
||||
t.Fatalf("runTempl() with spaced paths error = %v", err)
|
||||
}
|
||||
if _, err := os.Stat(outFile); os.IsNotExist(err) {
|
||||
t.Error("expected output file to be created")
|
||||
}
|
||||
}
|
||||
219
cmd/relspec/testhelpers_test.go
Normal file
219
cmd/relspec/testhelpers_test.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// minimalColumn is used to build test JSON fixtures.
|
||||
type minimalColumn struct {
|
||||
Name string `json:"name"`
|
||||
Table string `json:"table"`
|
||||
Schema string `json:"schema"`
|
||||
Type string `json:"type"`
|
||||
NotNull bool `json:"not_null"`
|
||||
IsPrimaryKey bool `json:"is_primary_key"`
|
||||
AutoIncrement bool `json:"auto_increment"`
|
||||
}
|
||||
|
||||
type minimalTable struct {
|
||||
Name string `json:"name"`
|
||||
Schema string `json:"schema"`
|
||||
Columns map[string]minimalColumn `json:"columns"`
|
||||
}
|
||||
|
||||
type minimalSchema struct {
|
||||
Name string `json:"name"`
|
||||
Tables []minimalTable `json:"tables"`
|
||||
}
|
||||
|
||||
type minimalDatabase struct {
|
||||
Name string `json:"name"`
|
||||
Schemas []minimalSchema `json:"schemas"`
|
||||
}
|
||||
|
||||
// writeTestJSON writes a minimal JSON database file with one schema ("public")
|
||||
// containing tables with the given names. Each table has a single "id" PK column.
|
||||
func writeTestJSON(t *testing.T, path string, tableNames []string) {
|
||||
t.Helper()
|
||||
|
||||
tables := make([]minimalTable, len(tableNames))
|
||||
for i, name := range tableNames {
|
||||
tables[i] = minimalTable{
|
||||
Name: name,
|
||||
Schema: "public",
|
||||
Columns: map[string]minimalColumn{
|
||||
"id": {
|
||||
Name: "id",
|
||||
Table: name,
|
||||
Schema: "public",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
AutoIncrement: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
db := minimalDatabase{
|
||||
Name: "test_db",
|
||||
Schemas: []minimalSchema{{Name: "public", Tables: tables}},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(db)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to marshal test JSON: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(path, data, 0644); err != nil {
|
||||
t.Fatalf("failed to write test file %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// convertState captures and restores all convert global vars.
|
||||
type convertState struct {
|
||||
sourceType string
|
||||
sourcePath string
|
||||
sourceConn string
|
||||
fromList []string
|
||||
targetType string
|
||||
targetPath string
|
||||
packageName string
|
||||
schemaFilter string
|
||||
flattenSchema bool
|
||||
}
|
||||
|
||||
func saveConvertState() convertState {
|
||||
return convertState{
|
||||
sourceType: convertSourceType,
|
||||
sourcePath: convertSourcePath,
|
||||
sourceConn: convertSourceConn,
|
||||
fromList: convertFromList,
|
||||
targetType: convertTargetType,
|
||||
targetPath: convertTargetPath,
|
||||
packageName: convertPackageName,
|
||||
schemaFilter: convertSchemaFilter,
|
||||
flattenSchema: convertFlattenSchema,
|
||||
}
|
||||
}
|
||||
|
||||
func restoreConvertState(s convertState) {
|
||||
convertSourceType = s.sourceType
|
||||
convertSourcePath = s.sourcePath
|
||||
convertSourceConn = s.sourceConn
|
||||
convertFromList = s.fromList
|
||||
convertTargetType = s.targetType
|
||||
convertTargetPath = s.targetPath
|
||||
convertPackageName = s.packageName
|
||||
convertSchemaFilter = s.schemaFilter
|
||||
convertFlattenSchema = s.flattenSchema
|
||||
}
|
||||
|
||||
// templState captures and restores all templ global vars.
|
||||
type templState struct {
|
||||
sourceType string
|
||||
sourcePath string
|
||||
sourceConn string
|
||||
fromList []string
|
||||
templatePath string
|
||||
outputPath string
|
||||
schemaFilter string
|
||||
mode string
|
||||
filenamePattern string
|
||||
}
|
||||
|
||||
func saveTemplState() templState {
|
||||
return templState{
|
||||
sourceType: templSourceType,
|
||||
sourcePath: templSourcePath,
|
||||
sourceConn: templSourceConn,
|
||||
fromList: templFromList,
|
||||
templatePath: templTemplatePath,
|
||||
outputPath: templOutputPath,
|
||||
schemaFilter: templSchemaFilter,
|
||||
mode: templMode,
|
||||
filenamePattern: templFilenamePattern,
|
||||
}
|
||||
}
|
||||
|
||||
func restoreTemplState(s templState) {
|
||||
templSourceType = s.sourceType
|
||||
templSourcePath = s.sourcePath
|
||||
templSourceConn = s.sourceConn
|
||||
templFromList = s.fromList
|
||||
templTemplatePath = s.templatePath
|
||||
templOutputPath = s.outputPath
|
||||
templSchemaFilter = s.schemaFilter
|
||||
templMode = s.mode
|
||||
templFilenamePattern = s.filenamePattern
|
||||
}
|
||||
|
||||
// mergeState captures and restores all merge global vars.
|
||||
type mergeState struct {
|
||||
targetType string
|
||||
targetPath string
|
||||
targetConn string
|
||||
sourceType string
|
||||
sourcePath string
|
||||
sourceConn string
|
||||
fromList []string
|
||||
outputType string
|
||||
outputPath string
|
||||
outputConn string
|
||||
skipDomains bool
|
||||
skipRelations bool
|
||||
skipEnums bool
|
||||
skipViews bool
|
||||
skipSequences bool
|
||||
skipTables string
|
||||
verbose bool
|
||||
reportPath string
|
||||
flattenSchema bool
|
||||
}
|
||||
|
||||
func saveMergeState() mergeState {
|
||||
return mergeState{
|
||||
targetType: mergeTargetType,
|
||||
targetPath: mergeTargetPath,
|
||||
targetConn: mergeTargetConn,
|
||||
sourceType: mergeSourceType,
|
||||
sourcePath: mergeSourcePath,
|
||||
sourceConn: mergeSourceConn,
|
||||
fromList: mergeFromList,
|
||||
outputType: mergeOutputType,
|
||||
outputPath: mergeOutputPath,
|
||||
outputConn: mergeOutputConn,
|
||||
skipDomains: mergeSkipDomains,
|
||||
skipRelations: mergeSkipRelations,
|
||||
skipEnums: mergeSkipEnums,
|
||||
skipViews: mergeSkipViews,
|
||||
skipSequences: mergeSkipSequences,
|
||||
skipTables: mergeSkipTables,
|
||||
verbose: mergeVerbose,
|
||||
reportPath: mergeReportPath,
|
||||
flattenSchema: mergeFlattenSchema,
|
||||
}
|
||||
}
|
||||
|
||||
func restoreMergeState(s mergeState) {
|
||||
mergeTargetType = s.targetType
|
||||
mergeTargetPath = s.targetPath
|
||||
mergeTargetConn = s.targetConn
|
||||
mergeSourceType = s.sourceType
|
||||
mergeSourcePath = s.sourcePath
|
||||
mergeSourceConn = s.sourceConn
|
||||
mergeFromList = s.fromList
|
||||
mergeOutputType = s.outputType
|
||||
mergeOutputPath = s.outputPath
|
||||
mergeOutputConn = s.outputConn
|
||||
mergeSkipDomains = s.skipDomains
|
||||
mergeSkipRelations = s.skipRelations
|
||||
mergeSkipEnums = s.skipEnums
|
||||
mergeSkipViews = s.skipViews
|
||||
mergeSkipSequences = s.skipSequences
|
||||
mergeSkipTables = s.skipTables
|
||||
mergeVerbose = s.verbose
|
||||
mergeReportPath = s.reportPath
|
||||
mergeFlattenSchema = s.flattenSchema
|
||||
}
|
||||
16
cmd/relspec/version.go
Normal file
16
cmd/relspec/version.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print version information",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("RelSpec %s\n", version)
|
||||
fmt.Printf("Built: %s\n", buildDate)
|
||||
},
|
||||
}
|
||||
108
doc.go
Normal file
108
doc.go
Normal file
@@ -0,0 +1,108 @@
|
||||
// Package relspecgo provides bidirectional conversion between database schema formats.
|
||||
//
|
||||
// RelSpec is a comprehensive database schema tool that reads, writes, and transforms
|
||||
// database schemas across multiple formats including live databases, ORM models,
|
||||
// schema definition languages, and data interchange formats.
|
||||
//
|
||||
// # Features
|
||||
//
|
||||
// - Read from 15+ formats: PostgreSQL, SQLite, DBML, GORM, Prisma, Drizzle, and more
|
||||
// - Write to 15+ formats: SQL, ORM models, schema definitions, JSON/YAML
|
||||
// - Interactive TUI editor for visual schema management
|
||||
// - Schema diff and merge capabilities
|
||||
// - Format-agnostic intermediate representation
|
||||
//
|
||||
// # Architecture
|
||||
//
|
||||
// RelSpec uses a hub-and-spoke architecture with models.Database as the central type:
|
||||
//
|
||||
// Input Format → Reader → models.Database → Writer → Output Format
|
||||
//
|
||||
// This allows any supported input format to be converted to any supported output format
|
||||
// without requiring N² conversion implementations.
|
||||
//
|
||||
// # Key Packages
|
||||
//
|
||||
// - pkg/models: Core data structures (Database, Schema, Table, Column, etc.)
|
||||
// - pkg/readers: Input format readers (dbml, pgsql, gorm, etc.)
|
||||
// - pkg/writers: Output format writers (dbml, pgsql, gorm, etc.)
|
||||
// - pkg/ui: Interactive terminal UI for schema editing
|
||||
// - pkg/diff: Schema comparison and difference detection
|
||||
// - pkg/merge: Schema merging utilities
|
||||
// - pkg/transform: Validation and normalization
|
||||
//
|
||||
// # Installation
|
||||
//
|
||||
// go install git.warky.dev/wdevs/relspecgo/cmd/relspec@latest
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// Command-line conversion:
|
||||
//
|
||||
// relspec convert --from dbml --from-path schema.dbml \
|
||||
// --to gorm --to-path ./models
|
||||
//
|
||||
// Interactive editor:
|
||||
//
|
||||
// relspec edit --from pgsql --from-conn "postgres://..." \
|
||||
// --to dbml --to-path schema.dbml
|
||||
//
|
||||
// Schema comparison:
|
||||
//
|
||||
// relspec diff --source-type pgsql --source-conn "postgres://..." \
|
||||
// --target-type dbml --target-path schema.dbml
|
||||
//
|
||||
// Merge schemas:
|
||||
//
|
||||
// relspec merge --target schema1.dbml --sources schema2.dbml,schema3.dbml
|
||||
//
|
||||
// # Supported Formats
|
||||
//
|
||||
// Input/Output Formats:
|
||||
// - dbml: Database Markup Language
|
||||
// - dctx: DCTX schema files
|
||||
// - drawdb: DrawDB JSON format
|
||||
// - graphql: GraphQL schema definition
|
||||
// - json: JSON schema representation
|
||||
// - yaml: YAML schema representation
|
||||
// - gorm: Go GORM models
|
||||
// - bun: Go Bun models
|
||||
// - drizzle: TypeScript Drizzle ORM
|
||||
// - prisma: Prisma schema language
|
||||
// - typeorm: TypeScript TypeORM entities
|
||||
// - pgsql: PostgreSQL (live DB or SQL)
|
||||
// - sqlite: SQLite (database file or SQL)
|
||||
//
|
||||
// # Library Usage
|
||||
//
|
||||
// RelSpec can be used as a Go library:
|
||||
//
|
||||
// import (
|
||||
// "git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
// "git.warky.dev/wdevs/relspecgo/pkg/readers/dbml"
|
||||
// "git.warky.dev/wdevs/relspecgo/pkg/writers/gorm"
|
||||
// )
|
||||
//
|
||||
// // Read DBML
|
||||
// reader := dbml.NewReader(&readers.ReaderOptions{
|
||||
// FilePath: "schema.dbml",
|
||||
// })
|
||||
// db, err := reader.ReadDatabase()
|
||||
//
|
||||
// // Write GORM models
|
||||
// writer := gorm.NewWriter(&writers.WriterOptions{
|
||||
// OutputPath: "./models",
|
||||
// PackageName: "models",
|
||||
// })
|
||||
// err = writer.WriteDatabase(db)
|
||||
//
|
||||
// # Documentation
|
||||
//
|
||||
// Full documentation available at: https://git.warky.dev/wdevs/relspecgo
|
||||
//
|
||||
// API documentation: go doc git.warky.dev/wdevs/relspecgo/...
|
||||
//
|
||||
// # License
|
||||
//
|
||||
// See LICENSE file in the repository root.
|
||||
package relspecgo
|
||||
@@ -1,6 +1,21 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
mssql:
|
||||
image: mcr.microsoft.com/mssql/server:2022-latest
|
||||
environment:
|
||||
- ACCEPT_EULA=Y
|
||||
- SA_PASSWORD=StrongPassword123!
|
||||
- MSSQL_PID=Express
|
||||
ports:
|
||||
- "1433:1433"
|
||||
volumes:
|
||||
- ./test_data/mssql/test_schema.sql:/test_schema.sql
|
||||
healthcheck:
|
||||
test: ["CMD", "/opt/mssql-tools/bin/sqlcmd", "-S", "localhost", "-U", "sa", "-P", "StrongPassword123!", "-Q", "SELECT 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: relspec-test-postgres
|
||||
|
||||
19
go.mod
19
go.mod
@@ -6,33 +6,46 @@ require (
|
||||
github.com/gdamore/tcell/v2 v2.8.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/jackc/pgx/v5 v5.7.6
|
||||
github.com/microsoft/go-mssqldb v1.9.6
|
||||
github.com/rivo/tview v0.42.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/uptrace/bun v1.2.16
|
||||
golang.org/x/text v0.28.0
|
||||
golang.org/x/text v0.31.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
modernc.org/sqlite v1.44.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/gdamore/encoding v1.0.1 // indirect
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
|
||||
github.com/golang-sql/sqlexp v0.1.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.34.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
modernc.org/libc v1.67.6 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
)
|
||||
|
||||
91
go.sum
91
go.sum
@@ -1,15 +1,39 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1 h1:Wgf5rZba3YZqeTNJPtvqZoBu1sBN/L4sry+u2U3Y75w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1/go.mod h1:xxCBG/f/4Vbmh2XQJBsOmNdxWUY5j/s27jujKPbQf14=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 h1:bFWuoEKg+gImo7pvkiQEFAc8ocibADgXeiLAxWhWmkI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1/go.mod h1:Vih/3yc6yac2JzU4hzpaDupBJP0Flaia9rXXrU8xyww=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/gdamore/encoding v1.0.1 h1:YzKZckdBL6jVt2Gc+5p82qhrGiqMdG/eNs6Wy0u3Uhw=
|
||||
github.com/gdamore/encoding v1.0.1/go.mod h1:0Z0cMFinngz9kS1QfMjCP8TY7em3bZYeeklsSDPivEo=
|
||||
github.com/gdamore/tcell/v2 v2.8.1 h1:KPNxyqclpWpWQlPLx6Xui1pMk8S+7+R37h3g07997NU=
|
||||
github.com/gdamore/tcell/v2 v2.8.1/go.mod h1:bj8ori1BG3OYMjmb3IklZVWfZUJ1UBQt9JXrOCOhGWw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
|
||||
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
@@ -26,15 +50,27 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/microsoft/go-mssqldb v1.9.6 h1:1MNQg5UiSsokiPz3++K2KPx4moKrwIqly1wv+RyCKTw=
|
||||
github.com/microsoft/go-mssqldb v1.9.6/go.mod h1:yYMPDufyoF2vVuVCUGtZARr06DKFIhMrluTcgWlXpr4=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/tview v0.42.0 h1:b/ftp+RxtDsHSaynXTbJb+/n/BxDEi+W3UfF5jILK6c=
|
||||
github.com/rivo/tview v0.42.0/go.mod h1:cSfIYfhpSGCjp3r/ECJb+GKS7cGJnqV8vfjQPwoXyfY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -45,6 +81,8 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@@ -70,13 +108,17 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
@@ -85,6 +127,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -92,14 +136,15 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
@@ -116,8 +161,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
|
||||
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
@@ -127,14 +172,16 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
@@ -142,3 +189,31 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=
|
||||
modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY=
|
||||
modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
|
||||
35
linux/arch/PKGBUILD
Normal file
35
linux/arch/PKGBUILD
Normal file
@@ -0,0 +1,35 @@
|
||||
# Maintainer: Hein (Warky Devs) <hein@warky.dev>
|
||||
pkgname=relspec
|
||||
pkgver=1.0.44
|
||||
pkgrel=1
|
||||
pkgdesc="RelSpec is a comprehensive database relations management tool that reads, transforms, and writes database table specifications across multiple formats and ORMs."
|
||||
arch=('x86_64' 'aarch64')
|
||||
url="https://git.warky.dev/wdevs/relspecgo"
|
||||
license=('MIT')
|
||||
makedepends=('go')
|
||||
source=("$pkgname-$pkgver.zip::$url/archive/v$pkgver.zip")
|
||||
sha256sums=('SKIP')
|
||||
|
||||
build() {
|
||||
cd "relspecgo"
|
||||
export CGO_ENABLED=0
|
||||
go build \
|
||||
-trimpath \
|
||||
-ldflags "-X git.warky.dev/wdevs/relspecgo/cmd/relspec.version=$pkgver" \
|
||||
-o "$pkgname" ./cmd/relspec
|
||||
}
|
||||
|
||||
check() {
|
||||
cd "relspecgo"
|
||||
go test ./...
|
||||
}
|
||||
|
||||
package() {
|
||||
cd "relspecgo"
|
||||
|
||||
# Binary
|
||||
install -Dm755 "$pkgname" "$pkgdir/usr/bin/$pkgname"
|
||||
|
||||
# Default config dir
|
||||
install -dm755 "$pkgdir/etc/relspec"
|
||||
}
|
||||
43
linux/centos/relspec.spec
Normal file
43
linux/centos/relspec.spec
Normal file
@@ -0,0 +1,43 @@
|
||||
Name: relspec
|
||||
Version: 1.0.44
|
||||
Release: 1%{?dist}
|
||||
Summary: RelSpec is a comprehensive database relations management tool that reads, transforms, and writes database table specifications across multiple formats and ORMs.
|
||||
|
||||
License: MIT
|
||||
URL: https://git.warky.dev/wdevs/relspecgo
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
|
||||
BuildRequires: golang >= 1.24
|
||||
|
||||
%global debug_package %{nil}
|
||||
%define _debugsource_packages 0
|
||||
%define _debuginfo_subpackages 0
|
||||
|
||||
%description
|
||||
RelSpec provides bidirectional conversion between various database schema
|
||||
formats including PostgreSQL, MySQL, SQLite, Prisma, TypeORM, GORM, Drizzle,
|
||||
DBML, GraphQL, and more.
|
||||
|
||||
%prep
|
||||
%autosetup
|
||||
|
||||
%build
|
||||
export CGO_ENABLED=0
|
||||
go build \
|
||||
-trimpath \
|
||||
-ldflags "-X git.warky.dev/wdevs/relspecgo/cmd/relspec.version=%{version}" \
|
||||
-o %{name} ./cmd/relspec
|
||||
|
||||
%install
|
||||
install -Dm755 %{name} %{buildroot}%{_bindir}/%{name}
|
||||
install -Dm644 LICENSE %{buildroot}%{_licensedir}/%{name}/LICENSE
|
||||
install -dm755 %{buildroot}%{_sysconfdir}/relspec
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%{_bindir}/%{name}
|
||||
%dir %{_sysconfdir}/relspec
|
||||
|
||||
%changelog
|
||||
* Wed Apr 08 2026 Hein (Warky Devs) <hein@warky.dev> - 1.0.42-1
|
||||
- Initial package
|
||||
11
linux/debian/control
Normal file
11
linux/debian/control
Normal file
@@ -0,0 +1,11 @@
|
||||
Package: relspec
|
||||
Version: VERSION
|
||||
Architecture: ARCH
|
||||
Maintainer: Hein (Warky Devs) <hein@warky.dev>
|
||||
Section: database
|
||||
Priority: optional
|
||||
Homepage: https://git.warky.dev/wdevs/relspecgo
|
||||
Description: Database schema conversion and analysis tool
|
||||
RelSpec provides bidirectional conversion between various database schema
|
||||
formats including PostgreSQL, MySQL, SQLite, Prisma, TypeORM, GORM, Drizzle,
|
||||
DBML, GraphQL, and more.
|
||||
28
pkg/commontypes/doc.go
Normal file
28
pkg/commontypes/doc.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Package commontypes provides shared type definitions used across multiple packages.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The commontypes package contains common data structures, constants, and type
|
||||
// definitions that are shared between different parts of RelSpec but don't belong
|
||||
// to the core models package.
|
||||
//
|
||||
// # Purpose
|
||||
//
|
||||
// This package helps avoid circular dependencies by providing a common location
|
||||
// for types that are used by multiple packages without creating import cycles.
|
||||
//
|
||||
// # Contents
|
||||
//
|
||||
// Common types may include:
|
||||
// - Shared enums and constants
|
||||
// - Utility type aliases
|
||||
// - Common error types
|
||||
// - Shared configuration structures
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// import "git.warky.dev/wdevs/relspecgo/pkg/commontypes"
|
||||
//
|
||||
// // Use common types
|
||||
// var formatType commontypes.FormatType
|
||||
package commontypes
|
||||
43
pkg/diff/doc.go
Normal file
43
pkg/diff/doc.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// Package diff provides utilities for comparing database schemas and identifying differences.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The diff package compares two database models at various granularity levels (database,
|
||||
// schema, table, column) and produces detailed reports of differences including:
|
||||
// - Missing items (present in source but not in target)
|
||||
// - Extra items (present in target but not in source)
|
||||
// - Modified items (present in both but with different properties)
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// Compare two databases and format the output:
|
||||
//
|
||||
// result := diff.CompareDatabases(sourceDB, targetDB)
|
||||
// err := diff.FormatDiff(result, diff.OutputFormatText, os.Stdout)
|
||||
//
|
||||
// # Output Formats
|
||||
//
|
||||
// The package supports multiple output formats:
|
||||
// - OutputFormatText: Human-readable text format
|
||||
// - OutputFormatJSON: Structured JSON output
|
||||
// - OutputFormatYAML: Structured YAML output
|
||||
//
|
||||
// # Comparison Scope
|
||||
//
|
||||
// The comparison covers:
|
||||
// - Schemas: Name, description, and contents
|
||||
// - Tables: Name, description, and all sub-elements
|
||||
// - Columns: Type, nullability, defaults, constraints
|
||||
// - Indexes: Columns, uniqueness, type
|
||||
// - Constraints: Type, columns, references
|
||||
// - Relationships: Type, from/to tables and columns
|
||||
// - Views: Definition and columns
|
||||
// - Sequences: Start value, increment, min/max values
|
||||
//
|
||||
// # Use Cases
|
||||
//
|
||||
// - Schema migration planning
|
||||
// - Database synchronization verification
|
||||
// - Change tracking and auditing
|
||||
// - CI/CD pipeline validation
|
||||
package diff
|
||||
40
pkg/inspector/doc.go
Normal file
40
pkg/inspector/doc.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Package inspector provides database introspection capabilities for live databases.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The inspector package contains utilities for connecting to live databases and
|
||||
// extracting their schema information through system catalog queries and metadata
|
||||
// inspection.
|
||||
//
|
||||
// # Features
|
||||
//
|
||||
// - Database connection management
|
||||
// - Schema metadata extraction
|
||||
// - Table structure analysis
|
||||
// - Constraint and index discovery
|
||||
// - Foreign key relationship mapping
|
||||
//
|
||||
// # Supported Databases
|
||||
//
|
||||
// - PostgreSQL (via pgx driver)
|
||||
// - SQLite (via modernc.org/sqlite driver)
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// This package is used internally by database readers (pgsql, sqlite) to perform
|
||||
// live schema introspection:
|
||||
//
|
||||
// inspector := inspector.NewPostgreSQLInspector(connString)
|
||||
// schemas, err := inspector.GetSchemas()
|
||||
// tables, err := inspector.GetTables(schemaName)
|
||||
//
|
||||
// # Architecture
|
||||
//
|
||||
// Each database type has its own inspector implementation that understands the
|
||||
// specific system catalogs and metadata structures of that database system.
|
||||
//
|
||||
// # Security
|
||||
//
|
||||
// Inspectors use read-only operations and never modify database structure.
|
||||
// Connection credentials should be handled securely.
|
||||
package inspector
|
||||
@@ -60,19 +60,19 @@ func (f *MarkdownFormatter) Format(report *InspectorReport) (string, error) {
|
||||
// Summary
|
||||
sb.WriteString(f.formatHeader("Summary"))
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(fmt.Sprintf("- Rules Checked: %d\n", report.Summary.RulesChecked))
|
||||
fmt.Fprintf(&sb, "- Rules Checked: %d\n", report.Summary.RulesChecked)
|
||||
|
||||
// Color-code error and warning counts
|
||||
if report.Summary.ErrorCount > 0 {
|
||||
sb.WriteString(f.colorize(fmt.Sprintf("- Errors: %d\n", report.Summary.ErrorCount), colorRed))
|
||||
} else {
|
||||
sb.WriteString(fmt.Sprintf("- Errors: %d\n", report.Summary.ErrorCount))
|
||||
fmt.Fprintf(&sb, "- Errors: %d\n", report.Summary.ErrorCount)
|
||||
}
|
||||
|
||||
if report.Summary.WarningCount > 0 {
|
||||
sb.WriteString(f.colorize(fmt.Sprintf("- Warnings: %d\n", report.Summary.WarningCount), colorYellow))
|
||||
} else {
|
||||
sb.WriteString(fmt.Sprintf("- Warnings: %d\n", report.Summary.WarningCount))
|
||||
fmt.Fprintf(&sb, "- Warnings: %d\n", report.Summary.WarningCount)
|
||||
}
|
||||
|
||||
if report.Summary.PassedCount > 0 {
|
||||
|
||||
99
pkg/mssql/README.md
Normal file
99
pkg/mssql/README.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# MSSQL Package
|
||||
|
||||
Provides utilities for working with Microsoft SQL Server data types and conversions.
|
||||
|
||||
## Components
|
||||
|
||||
### Type Mapping
|
||||
|
||||
Provides bidirectional conversion between canonical types and MSSQL types:
|
||||
|
||||
- **CanonicalToMSSQL**: Convert abstract types to MSSQL-specific types
|
||||
- **MSSQLToCanonical**: Convert MSSQL types to abstract representation
|
||||
|
||||
## Type Conversion Tables
|
||||
|
||||
### Canonical → MSSQL
|
||||
|
||||
| Canonical | MSSQL | Notes |
|
||||
|-----------|-------|-------|
|
||||
| int | INT | 32-bit signed integer |
|
||||
| int64 | BIGINT | 64-bit signed integer |
|
||||
| int32 | INT | 32-bit signed integer |
|
||||
| int16 | SMALLINT | 16-bit signed integer |
|
||||
| int8 | TINYINT | 8-bit unsigned integer |
|
||||
| bool | BIT | 0 (false) or 1 (true) |
|
||||
| float32 | REAL | Single precision floating point |
|
||||
| float64 | FLOAT | Double precision floating point |
|
||||
| decimal | NUMERIC | Fixed-point decimal number |
|
||||
| string | NVARCHAR(255) | Unicode variable-length string |
|
||||
| text | NVARCHAR(MAX) | Unicode large text |
|
||||
| timestamp | DATETIME2 | Date and time without timezone |
|
||||
| timestamptz | DATETIMEOFFSET | Date and time with timezone offset |
|
||||
| uuid | UNIQUEIDENTIFIER | GUID/UUID type |
|
||||
| bytea | VARBINARY(MAX) | Variable-length binary data |
|
||||
| date | DATE | Date only |
|
||||
| time | TIME | Time only |
|
||||
| json | NVARCHAR(MAX) | Stored as text (MSSQL v2016+) |
|
||||
| jsonb | NVARCHAR(MAX) | Stored as text (MSSQL v2016+) |
|
||||
|
||||
### MSSQL → Canonical
|
||||
|
||||
| MSSQL | Canonical | Notes |
|
||||
|-------|-----------|-------|
|
||||
| INT, INTEGER | int | Standard integer |
|
||||
| BIGINT | int64 | Large integer |
|
||||
| SMALLINT | int16 | Small integer |
|
||||
| TINYINT | int8 | Tiny integer |
|
||||
| BIT | bool | Boolean/bit flag |
|
||||
| REAL | float32 | Single precision |
|
||||
| FLOAT | float64 | Double precision |
|
||||
| NUMERIC, DECIMAL | decimal | Exact decimal |
|
||||
| NVARCHAR, VARCHAR | string | Variable-length string |
|
||||
| NCHAR, CHAR | string | Fixed-length string |
|
||||
| DATETIME2 | timestamp | Default timestamp |
|
||||
| DATETIMEOFFSET | timestamptz | Timestamp with timezone |
|
||||
| DATE | date | Date only |
|
||||
| TIME | time | Time only |
|
||||
| UNIQUEIDENTIFIER | uuid | UUID/GUID |
|
||||
| VARBINARY, BINARY | bytea | Binary data |
|
||||
| XML | string | Stored as text |
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/mssql"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Convert canonical to MSSQL
|
||||
mssqlType := mssql.ConvertCanonicalToMSSQL("int")
|
||||
fmt.Println(mssqlType) // Output: INT
|
||||
|
||||
// Convert MSSQL to canonical
|
||||
canonicalType := mssql.ConvertMSSQLToCanonical("BIGINT")
|
||||
fmt.Println(canonicalType) // Output: int64
|
||||
|
||||
// Handle parameterized types
|
||||
canonicalType = mssql.ConvertMSSQLToCanonical("NVARCHAR(255)")
|
||||
fmt.Println(canonicalType) // Output: string
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Run tests with:
|
||||
```bash
|
||||
go test ./pkg/mssql/...
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- Type conversions are case-insensitive
|
||||
- Parameterized types (e.g., `NVARCHAR(255)`) have their base type extracted
|
||||
- Unmapped types default to `string` for safety
|
||||
- The package supports SQL Server 2016 and later versions
|
||||
114
pkg/mssql/datatypes.go
Normal file
114
pkg/mssql/datatypes.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package mssql
|
||||
|
||||
import "strings"
|
||||
|
||||
// CanonicalToMSSQLTypes maps canonical types to MSSQL types
|
||||
var CanonicalToMSSQLTypes = map[string]string{
|
||||
"bool": "BIT",
|
||||
"int8": "TINYINT",
|
||||
"int16": "SMALLINT",
|
||||
"int": "INT",
|
||||
"int32": "INT",
|
||||
"int64": "BIGINT",
|
||||
"uint": "BIGINT",
|
||||
"uint8": "SMALLINT",
|
||||
"uint16": "INT",
|
||||
"uint32": "BIGINT",
|
||||
"uint64": "BIGINT",
|
||||
"float32": "REAL",
|
||||
"float64": "FLOAT",
|
||||
"decimal": "NUMERIC",
|
||||
"string": "NVARCHAR(255)",
|
||||
"text": "NVARCHAR(MAX)",
|
||||
"date": "DATE",
|
||||
"time": "TIME",
|
||||
"timestamp": "DATETIME2",
|
||||
"timestamptz": "DATETIMEOFFSET",
|
||||
"uuid": "UNIQUEIDENTIFIER",
|
||||
"json": "NVARCHAR(MAX)",
|
||||
"jsonb": "NVARCHAR(MAX)",
|
||||
"bytea": "VARBINARY(MAX)",
|
||||
}
|
||||
|
||||
// MSSQLToCanonicalTypes maps MSSQL types to canonical types
|
||||
var MSSQLToCanonicalTypes = map[string]string{
|
||||
"bit": "bool",
|
||||
"tinyint": "int8",
|
||||
"smallint": "int16",
|
||||
"int": "int",
|
||||
"integer": "int",
|
||||
"bigint": "int64",
|
||||
"real": "float32",
|
||||
"float": "float64",
|
||||
"numeric": "decimal",
|
||||
"decimal": "decimal",
|
||||
"money": "decimal",
|
||||
"smallmoney": "decimal",
|
||||
"nvarchar": "string",
|
||||
"nchar": "string",
|
||||
"varchar": "string",
|
||||
"char": "string",
|
||||
"text": "string",
|
||||
"ntext": "string",
|
||||
"date": "date",
|
||||
"time": "time",
|
||||
"datetime": "timestamp",
|
||||
"datetime2": "timestamp",
|
||||
"smalldatetime": "timestamp",
|
||||
"datetimeoffset": "timestamptz",
|
||||
"uniqueidentifier": "uuid",
|
||||
"varbinary": "bytea",
|
||||
"binary": "bytea",
|
||||
"image": "bytea",
|
||||
"xml": "string",
|
||||
"json": "json",
|
||||
"sql_variant": "string",
|
||||
"hierarchyid": "string",
|
||||
"geography": "string",
|
||||
"geometry": "string",
|
||||
}
|
||||
|
||||
// ConvertCanonicalToMSSQL converts a canonical type to MSSQL type
|
||||
func ConvertCanonicalToMSSQL(canonicalType string) string {
|
||||
// Check direct mapping
|
||||
if mssqlType, exists := CanonicalToMSSQLTypes[strings.ToLower(canonicalType)]; exists {
|
||||
return mssqlType
|
||||
}
|
||||
|
||||
// Try to find by prefix
|
||||
lowerType := strings.ToLower(canonicalType)
|
||||
for canonical, mssql := range CanonicalToMSSQLTypes {
|
||||
if strings.HasPrefix(lowerType, canonical) {
|
||||
return mssql
|
||||
}
|
||||
}
|
||||
|
||||
// Default to NVARCHAR
|
||||
return "NVARCHAR(255)"
|
||||
}
|
||||
|
||||
// ConvertMSSQLToCanonical converts an MSSQL type to canonical type
|
||||
func ConvertMSSQLToCanonical(mssqlType string) string {
|
||||
// Extract base type (remove parentheses and parameters)
|
||||
baseType := mssqlType
|
||||
if idx := strings.Index(baseType, "("); idx != -1 {
|
||||
baseType = baseType[:idx]
|
||||
}
|
||||
baseType = strings.TrimSpace(baseType)
|
||||
|
||||
// Check direct mapping
|
||||
if canonicalType, exists := MSSQLToCanonicalTypes[strings.ToLower(baseType)]; exists {
|
||||
return canonicalType
|
||||
}
|
||||
|
||||
// Try to find by prefix
|
||||
lowerType := strings.ToLower(baseType)
|
||||
for mssql, canonical := range MSSQLToCanonicalTypes {
|
||||
if strings.HasPrefix(lowerType, mssql) {
|
||||
return canonical
|
||||
}
|
||||
}
|
||||
|
||||
// Default to string
|
||||
return "string"
|
||||
}
|
||||
36
pkg/pgsql/doc.go
Normal file
36
pkg/pgsql/doc.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Package pgsql provides PostgreSQL-specific utilities and helpers.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The pgsql package contains PostgreSQL-specific functionality including:
|
||||
// - SQL reserved keyword validation
|
||||
// - Data type mappings and conversions
|
||||
// - PostgreSQL-specific schema introspection helpers
|
||||
//
|
||||
// # Components
|
||||
//
|
||||
// keywords.go - SQL reserved keywords validation
|
||||
//
|
||||
// Provides functions to check if identifiers conflict with SQL reserved words
|
||||
// and need quoting for safe usage in PostgreSQL queries.
|
||||
//
|
||||
// datatypes.go - PostgreSQL data type utilities
|
||||
//
|
||||
// Contains mappings between PostgreSQL data types and their equivalents in other
|
||||
// systems, as well as type conversion and normalization functions.
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// // Check if identifier needs quoting
|
||||
// if pgsql.IsReservedKeyword("user") {
|
||||
// // Quote the identifier
|
||||
// }
|
||||
//
|
||||
// // Normalize data type
|
||||
// normalizedType := pgsql.NormalizeDataType("varchar(255)")
|
||||
//
|
||||
// # Purpose
|
||||
//
|
||||
// This package supports the PostgreSQL reader and writer implementations by providing
|
||||
// shared utilities for handling PostgreSQL-specific schema elements and constraints.
|
||||
package pgsql
|
||||
246
pkg/pgsql/types_registry.go
Normal file
246
pkg/pgsql/types_registry.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package pgsql
|
||||
|
||||
import "strings"
|
||||
|
||||
// TypeSpec describes PostgreSQL type capabilities used by parsers/writers.
|
||||
type TypeSpec struct {
|
||||
SupportsLength bool
|
||||
SupportsPrecision bool
|
||||
}
|
||||
|
||||
var postgresBaseTypes = map[string]TypeSpec{
|
||||
// Numeric types
|
||||
"smallint": {},
|
||||
"integer": {},
|
||||
"bigint": {},
|
||||
"decimal": {SupportsPrecision: true},
|
||||
"numeric": {SupportsPrecision: true},
|
||||
"real": {},
|
||||
"double precision": {},
|
||||
"smallserial": {},
|
||||
"serial": {},
|
||||
"bigserial": {},
|
||||
"money": {},
|
||||
|
||||
// Character types
|
||||
"char": {SupportsLength: true},
|
||||
"character": {SupportsLength: true},
|
||||
"varchar": {SupportsLength: true},
|
||||
"character varying": {SupportsLength: true},
|
||||
"text": {},
|
||||
"name": {},
|
||||
|
||||
// Binary
|
||||
"bytea": {},
|
||||
|
||||
// Date/time
|
||||
"timestamp": {SupportsPrecision: true},
|
||||
"timestamp without time zone": {SupportsPrecision: true},
|
||||
"timestamp with time zone": {SupportsPrecision: true},
|
||||
"time": {SupportsPrecision: true},
|
||||
"time without time zone": {SupportsPrecision: true},
|
||||
"time with time zone": {SupportsPrecision: true},
|
||||
"date": {},
|
||||
"interval": {SupportsPrecision: true},
|
||||
|
||||
// Boolean
|
||||
"boolean": {},
|
||||
|
||||
// Geometric
|
||||
"point": {},
|
||||
"line": {},
|
||||
"lseg": {},
|
||||
"box": {},
|
||||
"path": {},
|
||||
"polygon": {},
|
||||
"circle": {},
|
||||
|
||||
// Network
|
||||
"cidr": {},
|
||||
"inet": {},
|
||||
"macaddr": {},
|
||||
"macaddr8": {},
|
||||
|
||||
// Bit string
|
||||
"bit": {SupportsLength: true},
|
||||
"bit varying": {SupportsLength: true},
|
||||
"varbit": {SupportsLength: true},
|
||||
|
||||
// Text search
|
||||
"tsvector": {},
|
||||
"tsquery": {},
|
||||
|
||||
// UUID/XML/JSON
|
||||
"uuid": {},
|
||||
"xml": {},
|
||||
"json": {},
|
||||
"jsonb": {},
|
||||
|
||||
// Range
|
||||
"int4range": {},
|
||||
"int8range": {},
|
||||
"numrange": {},
|
||||
"tsrange": {},
|
||||
"tstzrange": {},
|
||||
"daterange": {},
|
||||
"int4multirange": {},
|
||||
"int8multirange": {},
|
||||
"nummultirange": {},
|
||||
"tsmultirange": {},
|
||||
"tstzmultirange": {},
|
||||
"datemultirange": {},
|
||||
|
||||
// Object identifier
|
||||
"oid": {},
|
||||
"regclass": {},
|
||||
"regproc": {},
|
||||
"regtype": {},
|
||||
|
||||
// Pseudo-ish/common built-ins seen in schemas
|
||||
"record": {},
|
||||
"void": {},
|
||||
|
||||
// Common extensions
|
||||
"citext": {},
|
||||
"hstore": {},
|
||||
"ltree": {},
|
||||
"lquery": {},
|
||||
"ltxtquery": {},
|
||||
"vector": {SupportsLength: true}, // pgvector: vector(dim)
|
||||
"halfvec": {SupportsLength: true}, // pgvector: halfvec(dim)
|
||||
"sparsevec": {SupportsLength: true}, // pgvector: sparsevec(dim)
|
||||
}
|
||||
|
||||
var postgresTypeAliases = map[string]string{
|
||||
// Integer aliases
|
||||
"int2": "smallint",
|
||||
"int4": "integer",
|
||||
"int8": "bigint",
|
||||
"int": "integer",
|
||||
|
||||
// Serial aliases
|
||||
"serial2": "smallserial",
|
||||
"serial4": "serial",
|
||||
"serial8": "bigserial",
|
||||
|
||||
// Character aliases
|
||||
"bpchar": "char",
|
||||
|
||||
// Float aliases
|
||||
"float4": "real",
|
||||
"float8": "double precision",
|
||||
"float": "double precision",
|
||||
|
||||
// Time aliases
|
||||
"timestamptz": "timestamp with time zone",
|
||||
"timetz": "time with time zone",
|
||||
|
||||
// Bit alias
|
||||
"varbit": "bit varying",
|
||||
|
||||
// Boolean alias
|
||||
"bool": "boolean",
|
||||
}
|
||||
|
||||
// GetPostgresBaseTypes returns a sorted-ish stable list of registered base type names.
|
||||
func GetPostgresBaseTypes() []string {
|
||||
result := make([]string, 0, len(postgresBaseTypes))
|
||||
for t := range postgresBaseTypes {
|
||||
result = append(result, t)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetPostgresTypes returns the registered PostgreSQL types.
|
||||
// When includeArrays is true, each base type also includes an array variant ("type[]").
|
||||
func GetPostgresTypes(includeArrays bool) []string {
|
||||
base := GetPostgresBaseTypes()
|
||||
if !includeArrays {
|
||||
return base
|
||||
}
|
||||
|
||||
result := make([]string, 0, len(base)*2)
|
||||
result = append(result, base...)
|
||||
for _, t := range base {
|
||||
result = append(result, t+"[]")
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ExtractBaseType returns the type without outer array suffixes and modifiers.
|
||||
// Examples:
|
||||
// - varchar(255) -> varchar
|
||||
// - text[] -> text
|
||||
// - numeric(10,2)[] -> numeric
|
||||
func ExtractBaseType(sqlType string) string {
|
||||
t := normalizeTypeToken(sqlType)
|
||||
t = strings.TrimSpace(stripArraySuffixes(t))
|
||||
if idx := strings.Index(t, "("); idx > 0 {
|
||||
t = strings.TrimSpace(t[:idx])
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ExtractBaseTypeLower is ExtractBaseType with lowercase normalization.
|
||||
func ExtractBaseTypeLower(sqlType string) string {
|
||||
return strings.ToLower(ExtractBaseType(sqlType))
|
||||
}
|
||||
|
||||
// IsArrayType reports whether the SQL type has one or more [] suffixes.
|
||||
func IsArrayType(sqlType string) bool {
|
||||
t := normalizeTypeToken(sqlType)
|
||||
return strings.HasSuffix(t, "[]")
|
||||
}
|
||||
|
||||
// ElementType returns the underlying element type for array types.
|
||||
// For non-array types, it returns the input unchanged.
|
||||
func ElementType(sqlType string) string {
|
||||
t := normalizeTypeToken(sqlType)
|
||||
return stripArraySuffixes(t)
|
||||
}
|
||||
|
||||
// CanonicalizeBaseType resolves aliases to canonical PostgreSQL type names.
|
||||
func CanonicalizeBaseType(baseType string) string {
|
||||
base := strings.ToLower(normalizeTypeToken(baseType))
|
||||
if canonical, ok := postgresTypeAliases[base]; ok {
|
||||
return canonical
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
// IsKnownPostgresType reports whether a type (including array forms) exists in the registry.
|
||||
func IsKnownPostgresType(sqlType string) bool {
|
||||
base := CanonicalizeBaseType(ExtractBaseTypeLower(sqlType))
|
||||
_, ok := postgresBaseTypes[base]
|
||||
return ok
|
||||
}
|
||||
|
||||
// SupportsLength reports if this SQL type accepts a single length/dimension modifier.
|
||||
func SupportsLength(sqlType string) bool {
|
||||
base := CanonicalizeBaseType(ExtractBaseTypeLower(sqlType))
|
||||
spec, ok := postgresBaseTypes[base]
|
||||
return ok && spec.SupportsLength
|
||||
}
|
||||
|
||||
// SupportsPrecision reports if this SQL type accepts precision (and possibly scale).
|
||||
func SupportsPrecision(sqlType string) bool {
|
||||
base := CanonicalizeBaseType(ExtractBaseTypeLower(sqlType))
|
||||
spec, ok := postgresBaseTypes[base]
|
||||
return ok && spec.SupportsPrecision
|
||||
}
|
||||
|
||||
// HasExplicitTypeModifier reports if the type already includes "(...)".
|
||||
func HasExplicitTypeModifier(sqlType string) bool {
|
||||
return strings.Contains(sqlType, "(")
|
||||
}
|
||||
|
||||
func stripArraySuffixes(t string) string {
|
||||
for strings.HasSuffix(t, "[]") {
|
||||
t = strings.TrimSpace(strings.TrimSuffix(t, "[]"))
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func normalizeTypeToken(t string) string {
|
||||
return strings.Join(strings.Fields(strings.TrimSpace(t)), " ")
|
||||
}
|
||||
99
pkg/pgsql/types_registry_test.go
Normal file
99
pkg/pgsql/types_registry_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package pgsql
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestPostgresTypeRegistry_MasterListIncludesRequestedTypes(t *testing.T) {
|
||||
required := []string{
|
||||
"vector",
|
||||
"integer",
|
||||
"citext",
|
||||
}
|
||||
|
||||
types := make(map[string]bool)
|
||||
for _, typ := range GetPostgresTypes(true) {
|
||||
types[typ] = true
|
||||
}
|
||||
|
||||
for _, typ := range required {
|
||||
if !types[typ] {
|
||||
t.Fatalf("master type list missing %q", typ)
|
||||
}
|
||||
if !types[typ+"[]"] {
|
||||
t.Fatalf("master type list missing array variant %q", typ+"[]")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresTypeRegistry_TypeParsingAndCapabilities(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
wantBase string
|
||||
wantCanonicalBase string
|
||||
wantArray bool
|
||||
wantKnown bool
|
||||
wantLength bool
|
||||
wantPrecision bool
|
||||
}{
|
||||
{
|
||||
input: "integer[]",
|
||||
wantBase: "integer",
|
||||
wantCanonicalBase: "integer",
|
||||
wantArray: true,
|
||||
wantKnown: true,
|
||||
},
|
||||
{
|
||||
input: "citext[]",
|
||||
wantBase: "citext",
|
||||
wantCanonicalBase: "citext",
|
||||
wantArray: true,
|
||||
wantKnown: true,
|
||||
},
|
||||
{
|
||||
input: "vector(1536)",
|
||||
wantBase: "vector",
|
||||
wantCanonicalBase: "vector",
|
||||
wantKnown: true,
|
||||
wantLength: true,
|
||||
},
|
||||
{
|
||||
input: "numeric(10,2)",
|
||||
wantBase: "numeric",
|
||||
wantCanonicalBase: "numeric",
|
||||
wantKnown: true,
|
||||
wantPrecision: true,
|
||||
},
|
||||
{
|
||||
input: "int4",
|
||||
wantBase: "int4",
|
||||
wantCanonicalBase: "integer",
|
||||
wantKnown: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
base := ExtractBaseTypeLower(tt.input)
|
||||
if base != tt.wantBase {
|
||||
t.Fatalf("ExtractBaseTypeLower(%q) = %q, want %q", tt.input, base, tt.wantBase)
|
||||
}
|
||||
|
||||
canonical := CanonicalizeBaseType(base)
|
||||
if canonical != tt.wantCanonicalBase {
|
||||
t.Fatalf("CanonicalizeBaseType(%q) = %q, want %q", base, canonical, tt.wantCanonicalBase)
|
||||
}
|
||||
|
||||
if IsArrayType(tt.input) != tt.wantArray {
|
||||
t.Fatalf("IsArrayType(%q) = %v, want %v", tt.input, IsArrayType(tt.input), tt.wantArray)
|
||||
}
|
||||
if IsKnownPostgresType(tt.input) != tt.wantKnown {
|
||||
t.Fatalf("IsKnownPostgresType(%q) = %v, want %v", tt.input, IsKnownPostgresType(tt.input), tt.wantKnown)
|
||||
}
|
||||
if SupportsLength(tt.input) != tt.wantLength {
|
||||
t.Fatalf("SupportsLength(%q) = %v, want %v", tt.input, SupportsLength(tt.input), tt.wantLength)
|
||||
}
|
||||
if SupportsPrecision(tt.input) != tt.wantPrecision {
|
||||
t.Fatalf("SupportsPrecision(%q) = %v, want %v", tt.input, SupportsPrecision(tt.input), tt.wantPrecision)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
@@ -700,16 +701,21 @@ func (r *Reader) extractBunTag(tag string) string {
|
||||
// parseTypeWithLength parses a type string and extracts length if present
|
||||
// e.g., "varchar(255)" returns ("varchar", 255)
|
||||
func (r *Reader) parseTypeWithLength(typeStr string) (baseType string, length int) {
|
||||
typeStr = strings.TrimSpace(typeStr)
|
||||
baseType = typeStr
|
||||
|
||||
// Check for type with length: varchar(255), char(10), etc.
|
||||
re := regexp.MustCompile(`^([a-zA-Z\s]+)\((\d+)\)$`)
|
||||
matches := re.FindStringSubmatch(typeStr)
|
||||
if len(matches) == 3 {
|
||||
if _, err := fmt.Sscanf(matches[2], "%d", &length); err == nil {
|
||||
baseType = strings.TrimSpace(matches[1])
|
||||
return
|
||||
rawBaseType := strings.TrimSpace(matches[1])
|
||||
if pgsql.SupportsLength(rawBaseType) {
|
||||
if _, err := fmt.Sscanf(matches[2], "%d", &length); err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
baseType = typeStr
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -71,8 +71,11 @@ func TestReader_ReadDatabase_Simple(t *testing.T) {
|
||||
if !emailCol.NotNull {
|
||||
t.Error("Column 'email' should be NOT NULL (explicit 'notnull' tag)")
|
||||
}
|
||||
if emailCol.Type != "varchar" || emailCol.Length != 255 {
|
||||
t.Errorf("Expected email type 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
|
||||
if emailCol.Type != "varchar" && emailCol.Type != "varchar(255)" {
|
||||
t.Errorf("Expected email type 'varchar' or 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
|
||||
}
|
||||
if emailCol.Length != 255 {
|
||||
t.Errorf("Expected email length 255, got %d", emailCol.Length)
|
||||
}
|
||||
|
||||
// Verify name column - primitive string type should be NOT NULL by default in Bun
|
||||
@@ -356,6 +359,33 @@ func TestReader_ReadDatabase_Complex(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTypeWithLength_PreservesExplicitTypeModifiers(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
input string
|
||||
wantType string
|
||||
wantLength int
|
||||
}{
|
||||
{"varchar(255)", "varchar(255)", 255},
|
||||
{"character varying(120)", "character varying(120)", 120},
|
||||
{"vector(1536)", "vector(1536)", 1536},
|
||||
{"numeric(10,2)", "numeric(10,2)", 0},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
gotType, gotLength := reader.parseTypeWithLength(tt.input)
|
||||
if gotType != tt.wantType {
|
||||
t.Fatalf("parseTypeWithLength(%q) type = %q, want %q", tt.input, gotType, tt.wantType)
|
||||
}
|
||||
if gotLength != tt.wantLength {
|
||||
t.Fatalf("parseTypeWithLength(%q) length = %d, want %d", tt.input, gotLength, tt.wantLength)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_ReadSchema(t *testing.T) {
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "bun", "simple.go"),
|
||||
@@ -485,9 +515,9 @@ func TestReader_NullableTypes(t *testing.T) {
|
||||
|
||||
// Test all nullability scenarios
|
||||
tests := []struct {
|
||||
column string
|
||||
notNull bool
|
||||
reason string
|
||||
column string
|
||||
notNull bool
|
||||
reason string
|
||||
}{
|
||||
{"id", true, "primary key"},
|
||||
{"user_id", true, "explicit notnull tag"},
|
||||
|
||||
@@ -567,110 +567,182 @@ func (r *Reader) parseDBML(content string) (*models.Database, error) {
|
||||
// parseColumn parses a DBML column definition
|
||||
func (r *Reader) parseColumn(line, tableName, schemaName string) (*models.Column, *models.Constraint) {
|
||||
// Format: column_name type [attributes] // comment
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) < 2 {
|
||||
lineNoComment, inlineComment := splitInlineComment(line)
|
||||
signature, attrs := splitColumnSignatureAndAttrs(lineNoComment)
|
||||
columnName, columnType, ok := parseColumnSignature(signature)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
columnName := stripQuotes(parts[0])
|
||||
columnType := stripQuotes(parts[1])
|
||||
|
||||
column := models.InitColumn(columnName, tableName, schemaName)
|
||||
column.Type = columnType
|
||||
|
||||
var constraint *models.Constraint
|
||||
|
||||
// Parse attributes in brackets
|
||||
if strings.Contains(line, "[") && strings.Contains(line, "]") {
|
||||
attrStart := strings.Index(line, "[")
|
||||
attrEnd := strings.Index(line, "]")
|
||||
if attrStart < attrEnd {
|
||||
attrs := line[attrStart+1 : attrEnd]
|
||||
attrList := strings.Split(attrs, ",")
|
||||
if attrs != "" {
|
||||
attrList := strings.Split(attrs, ",")
|
||||
|
||||
for _, attr := range attrList {
|
||||
attr = strings.TrimSpace(attr)
|
||||
for _, attr := range attrList {
|
||||
attr = strings.TrimSpace(attr)
|
||||
|
||||
if strings.Contains(attr, "primary key") || attr == "pk" {
|
||||
column.IsPrimaryKey = true
|
||||
column.NotNull = true
|
||||
} else if strings.Contains(attr, "not null") {
|
||||
column.NotNull = true
|
||||
} else if attr == "increment" {
|
||||
column.AutoIncrement = true
|
||||
} else if strings.HasPrefix(attr, "default:") {
|
||||
defaultVal := strings.TrimSpace(strings.TrimPrefix(attr, "default:"))
|
||||
column.Default = strings.Trim(defaultVal, "'\"")
|
||||
} else if attr == "unique" {
|
||||
// Create a unique constraint
|
||||
// Clean table name by removing leading underscores to avoid double underscores
|
||||
cleanTableName := strings.TrimLeft(tableName, "_")
|
||||
uniqueConstraint := models.InitConstraint(
|
||||
fmt.Sprintf("ukey_%s_%s", cleanTableName, columnName),
|
||||
models.UniqueConstraint,
|
||||
)
|
||||
uniqueConstraint.Schema = schemaName
|
||||
uniqueConstraint.Table = tableName
|
||||
uniqueConstraint.Columns = []string{columnName}
|
||||
// Store it to be added later
|
||||
if constraint == nil {
|
||||
constraint = uniqueConstraint
|
||||
}
|
||||
} else if strings.HasPrefix(attr, "note:") {
|
||||
// Parse column note/comment
|
||||
note := strings.TrimSpace(strings.TrimPrefix(attr, "note:"))
|
||||
column.Comment = strings.Trim(note, "'\"")
|
||||
} else if strings.HasPrefix(attr, "ref:") {
|
||||
// Parse inline reference
|
||||
// DBML semantics depend on context:
|
||||
// - On FK column: ref: < target means "this FK references target"
|
||||
// - On PK column: ref: < source means "source references this PK" (reverse notation)
|
||||
refStr := strings.TrimSpace(strings.TrimPrefix(attr, "ref:"))
|
||||
|
||||
// Check relationship direction operator
|
||||
refOp := strings.TrimSpace(refStr)
|
||||
var isReverse bool
|
||||
if strings.HasPrefix(refOp, "<") {
|
||||
// < means "is referenced by" - only makes sense on PK columns
|
||||
isReverse = column.IsPrimaryKey
|
||||
}
|
||||
// > means "references" - always a forward FK, never reverse
|
||||
|
||||
constraint = r.parseRef(refStr)
|
||||
if constraint != nil {
|
||||
if isReverse {
|
||||
// Reverse: parsed ref is SOURCE, current column is TARGET
|
||||
// Constraint should be ON the source table
|
||||
constraint.Schema = constraint.ReferencedSchema
|
||||
constraint.Table = constraint.ReferencedTable
|
||||
constraint.Columns = constraint.ReferencedColumns
|
||||
constraint.ReferencedSchema = schemaName
|
||||
constraint.ReferencedTable = tableName
|
||||
constraint.ReferencedColumns = []string{columnName}
|
||||
} else {
|
||||
// Forward: current column is SOURCE, parsed ref is TARGET
|
||||
// Standard FK: constraint is ON current table
|
||||
constraint.Schema = schemaName
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = []string{columnName}
|
||||
}
|
||||
// Generate constraint name based on table and columns
|
||||
constraint.Name = fmt.Sprintf("fk_%s_%s", constraint.Table, strings.Join(constraint.Columns, "_"))
|
||||
if strings.Contains(attr, "primary key") || attr == "pk" {
|
||||
column.IsPrimaryKey = true
|
||||
column.NotNull = true
|
||||
} else if strings.Contains(attr, "not null") {
|
||||
column.NotNull = true
|
||||
} else if attr == "increment" {
|
||||
column.AutoIncrement = true
|
||||
} else if strings.HasPrefix(attr, "default:") {
|
||||
defaultVal := strings.TrimSpace(strings.TrimPrefix(attr, "default:"))
|
||||
column.Default = strings.Trim(defaultVal, "'\"")
|
||||
} else if attr == "unique" {
|
||||
// Create a unique constraint
|
||||
// Clean table name by removing leading underscores to avoid double underscores
|
||||
cleanTableName := strings.TrimLeft(tableName, "_")
|
||||
uniqueConstraint := models.InitConstraint(
|
||||
fmt.Sprintf("ukey_%s_%s", cleanTableName, columnName),
|
||||
models.UniqueConstraint,
|
||||
)
|
||||
uniqueConstraint.Schema = schemaName
|
||||
uniqueConstraint.Table = tableName
|
||||
uniqueConstraint.Columns = []string{columnName}
|
||||
// Store it to be added later
|
||||
if constraint == nil {
|
||||
constraint = uniqueConstraint
|
||||
}
|
||||
} else if strings.HasPrefix(attr, "note:") {
|
||||
// Parse column note/comment
|
||||
note := strings.TrimSpace(strings.TrimPrefix(attr, "note:"))
|
||||
column.Comment = strings.Trim(note, "'\"")
|
||||
} else if strings.HasPrefix(attr, "ref:") {
|
||||
// Parse inline reference
|
||||
// DBML semantics depend on context:
|
||||
// - On FK column: ref: < target means "this FK references target"
|
||||
// - On PK column: ref: < source means "source references this PK" (reverse notation)
|
||||
refStr := strings.TrimSpace(strings.TrimPrefix(attr, "ref:"))
|
||||
|
||||
// Check relationship direction operator
|
||||
refOp := strings.TrimSpace(refStr)
|
||||
var isReverse bool
|
||||
if strings.HasPrefix(refOp, "<") {
|
||||
// < means "is referenced by" - only makes sense on PK columns
|
||||
isReverse = column.IsPrimaryKey
|
||||
}
|
||||
// > means "references" - always a forward FK, never reverse
|
||||
|
||||
constraint = r.parseRef(refStr)
|
||||
if constraint != nil {
|
||||
if isReverse {
|
||||
// Reverse: parsed ref is SOURCE, current column is TARGET
|
||||
// Constraint should be ON the source table
|
||||
constraint.Schema = constraint.ReferencedSchema
|
||||
constraint.Table = constraint.ReferencedTable
|
||||
constraint.Columns = constraint.ReferencedColumns
|
||||
constraint.ReferencedSchema = schemaName
|
||||
constraint.ReferencedTable = tableName
|
||||
constraint.ReferencedColumns = []string{columnName}
|
||||
} else {
|
||||
// Forward: current column is SOURCE, parsed ref is TARGET
|
||||
// Standard FK: constraint is ON current table
|
||||
constraint.Schema = schemaName
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = []string{columnName}
|
||||
}
|
||||
// Generate constraint name based on table and columns
|
||||
constraint.Name = fmt.Sprintf("fk_%s_%s", constraint.Table, strings.Join(constraint.Columns, "_"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse inline comment
|
||||
if strings.Contains(line, "//") {
|
||||
commentStart := strings.Index(line, "//")
|
||||
column.Comment = strings.TrimSpace(line[commentStart+2:])
|
||||
if inlineComment != "" {
|
||||
column.Comment = inlineComment
|
||||
}
|
||||
|
||||
return column, constraint
|
||||
}
|
||||
|
||||
func splitInlineComment(line string) (string, string) {
|
||||
commentStart := strings.Index(line, "//")
|
||||
if commentStart == -1 {
|
||||
return line, ""
|
||||
}
|
||||
|
||||
return strings.TrimSpace(line[:commentStart]), strings.TrimSpace(line[commentStart+2:])
|
||||
}
|
||||
|
||||
func splitColumnSignatureAndAttrs(line string) (string, string) {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if trimmed == "" || !strings.HasSuffix(trimmed, "]") {
|
||||
return trimmed, ""
|
||||
}
|
||||
|
||||
bracketDepth := 0
|
||||
for i := len(trimmed) - 1; i >= 0; i-- {
|
||||
switch trimmed[i] {
|
||||
case ']':
|
||||
bracketDepth++
|
||||
case '[':
|
||||
bracketDepth--
|
||||
if bracketDepth == 0 {
|
||||
// DBML attributes are a trailing [ ... ] block preceded by whitespace.
|
||||
// This avoids confusing array types like text[] with attribute blocks.
|
||||
if i > 0 && (trimmed[i-1] == ' ' || trimmed[i-1] == '\t') {
|
||||
return strings.TrimSpace(trimmed[:i]), strings.TrimSpace(trimmed[i+1 : len(trimmed)-1])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return trimmed, ""
|
||||
}
|
||||
|
||||
func parseColumnSignature(signature string) (string, string, bool) {
|
||||
signature = strings.TrimSpace(signature)
|
||||
if signature == "" {
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
var splitAt int
|
||||
if signature[0] == '"' || signature[0] == '\'' {
|
||||
quote := signature[0]
|
||||
splitAt = 1
|
||||
for splitAt < len(signature) {
|
||||
if signature[splitAt] == quote {
|
||||
splitAt++
|
||||
break
|
||||
}
|
||||
splitAt++
|
||||
}
|
||||
} else {
|
||||
for splitAt < len(signature) && signature[splitAt] != ' ' && signature[splitAt] != '\t' {
|
||||
splitAt++
|
||||
}
|
||||
}
|
||||
|
||||
if splitAt <= 0 || splitAt >= len(signature) {
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
columnName := stripQuotes(strings.TrimSpace(signature[:splitAt]))
|
||||
columnType := stripWrappingQuotes(strings.TrimSpace(signature[splitAt:]))
|
||||
if columnName == "" || columnType == "" {
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
return columnName, columnType, true
|
||||
}
|
||||
|
||||
func stripWrappingQuotes(s string) string {
|
||||
s = strings.TrimSpace(s)
|
||||
if len(s) >= 2 && ((s[0] == '"' && s[len(s)-1] == '"') || (s[0] == '\'' && s[len(s)-1] == '\'')) {
|
||||
return s[1 : len(s)-1]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// parseIndex parses a DBML index definition
|
||||
func (r *Reader) parseIndex(line, tableName, schemaName string) *models.Index {
|
||||
// Format: (columns) [attributes] OR columnname [attributes]
|
||||
@@ -832,7 +904,11 @@ func (r *Reader) parseRef(refStr string) *models.Constraint {
|
||||
for _, action := range actionList {
|
||||
action = strings.TrimSpace(action)
|
||||
|
||||
if strings.HasPrefix(action, "ondelete:") {
|
||||
if strings.HasPrefix(action, "delete:") {
|
||||
constraint.OnDelete = strings.TrimSpace(strings.TrimPrefix(action, "delete:"))
|
||||
} else if strings.HasPrefix(action, "update:") {
|
||||
constraint.OnUpdate = strings.TrimSpace(strings.TrimPrefix(action, "update:"))
|
||||
} else if strings.HasPrefix(action, "ondelete:") {
|
||||
constraint.OnDelete = strings.TrimSpace(strings.TrimPrefix(action, "ondelete:"))
|
||||
} else if strings.HasPrefix(action, "onupdate:") {
|
||||
constraint.OnUpdate = strings.TrimSpace(strings.TrimPrefix(action, "onupdate:"))
|
||||
|
||||
@@ -839,6 +839,67 @@ func TestConstraintNaming(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseColumn_PostgresTypes(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
line string
|
||||
wantName string
|
||||
wantType string
|
||||
wantNotNull bool
|
||||
wantComment string
|
||||
}{
|
||||
{
|
||||
name: "array type with attrs",
|
||||
line: "tags text[] [not null]",
|
||||
wantName: "tags",
|
||||
wantType: "text[]",
|
||||
wantNotNull: true,
|
||||
},
|
||||
{
|
||||
name: "vector with dimension",
|
||||
line: "embedding vector(1536)",
|
||||
wantName: "embedding",
|
||||
wantType: "vector(1536)",
|
||||
},
|
||||
{
|
||||
name: "multi word timestamp type",
|
||||
line: "published_at timestamp with time zone",
|
||||
wantName: "published_at",
|
||||
wantType: "timestamp with time zone",
|
||||
},
|
||||
{
|
||||
name: "array type with inline comment",
|
||||
line: "labels varchar(20)[] // column labels",
|
||||
wantName: "labels",
|
||||
wantType: "varchar(20)[]",
|
||||
wantComment: "column labels",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
col, _ := reader.parseColumn(tt.line, "events", "public")
|
||||
if col == nil {
|
||||
t.Fatalf("parseColumn() returned nil column")
|
||||
}
|
||||
if col.Name != tt.wantName {
|
||||
t.Errorf("column name = %q, want %q", col.Name, tt.wantName)
|
||||
}
|
||||
if col.Type != tt.wantType {
|
||||
t.Errorf("column type = %q, want %q", col.Type, tt.wantType)
|
||||
}
|
||||
if col.NotNull != tt.wantNotNull {
|
||||
t.Errorf("column not null = %v, want %v", col.NotNull, tt.wantNotNull)
|
||||
}
|
||||
if col.Comment != tt.wantComment {
|
||||
t.Errorf("column comment = %q, want %q", col.Comment, tt.wantComment)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getKeys[V any](m map[string]V) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
@@ -232,7 +233,19 @@ func (r *Reader) convertField(dctxField *models.DCTXField, tableName string) ([]
|
||||
|
||||
// mapDataType maps Clarion data types to SQL types
|
||||
func (r *Reader) mapDataType(clarionType string, size int) (sqlType string, precision int) {
|
||||
switch strings.ToUpper(clarionType) {
|
||||
trimmedType := strings.TrimSpace(clarionType)
|
||||
|
||||
// Preserve known PostgreSQL types (including arrays and extension types)
|
||||
// from DCTX input instead of coercing them to generic text.
|
||||
if pgsql.IsKnownPostgresType(trimmedType) {
|
||||
pgType := canonicalizePostgresType(trimmedType)
|
||||
if !pgsql.HasExplicitTypeModifier(pgType) && size > 0 && pgsql.SupportsLength(pgType) {
|
||||
return pgType, size
|
||||
}
|
||||
return pgType, 0
|
||||
}
|
||||
|
||||
switch strings.ToUpper(trimmedType) {
|
||||
case "LONG":
|
||||
if size == 8 {
|
||||
return "bigint", 0
|
||||
@@ -306,6 +319,32 @@ func (r *Reader) mapDataType(clarionType string, size int) (sqlType string, prec
|
||||
}
|
||||
}
|
||||
|
||||
func canonicalizePostgresType(typeStr string) string {
|
||||
t := strings.ToLower(strings.Join(strings.Fields(strings.TrimSpace(typeStr)), " "))
|
||||
if t == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Handle array suffixes
|
||||
arrayCount := 0
|
||||
for strings.HasSuffix(t, "[]") {
|
||||
arrayCount++
|
||||
t = strings.TrimSpace(strings.TrimSuffix(t, "[]"))
|
||||
}
|
||||
|
||||
// Handle optional type modifier
|
||||
modifier := ""
|
||||
if idx := strings.Index(t, "("); idx > 0 {
|
||||
if end := strings.LastIndex(t, ")"); end > idx {
|
||||
modifier = t[idx : end+1]
|
||||
t = strings.TrimSpace(t[:idx])
|
||||
}
|
||||
}
|
||||
|
||||
base := pgsql.CanonicalizeBaseType(t)
|
||||
return base + modifier + strings.Repeat("[]", arrayCount)
|
||||
}
|
||||
|
||||
// processKeys processes DCTX keys and converts them to indexes and primary keys
|
||||
func (r *Reader) processKeys(dctxTable *models.DCTXTable, table *models.Table, fieldGuidMap map[string]string) error {
|
||||
for _, dctxKey := range dctxTable.Keys {
|
||||
|
||||
@@ -493,3 +493,55 @@ func TestRelationships(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapDataType_PostgresTypes(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputType string
|
||||
size int
|
||||
wantType string
|
||||
wantLength int
|
||||
}{
|
||||
{
|
||||
name: "integer array preserved",
|
||||
inputType: "integer[]",
|
||||
wantType: "integer[]",
|
||||
},
|
||||
{
|
||||
name: "citext array preserved",
|
||||
inputType: "citext[]",
|
||||
wantType: "citext[]",
|
||||
},
|
||||
{
|
||||
name: "vector modifier preserved",
|
||||
inputType: "vector(1536)",
|
||||
wantType: "vector(1536)",
|
||||
},
|
||||
{
|
||||
name: "alias canonicalized in array",
|
||||
inputType: "int4[]",
|
||||
wantType: "integer[]",
|
||||
},
|
||||
{
|
||||
name: "varchar length from size",
|
||||
inputType: "varchar",
|
||||
size: 120,
|
||||
wantType: "varchar",
|
||||
wantLength: 120,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotType, gotLength := reader.mapDataType(tt.inputType, tt.size)
|
||||
if gotType != tt.wantType {
|
||||
t.Fatalf("mapDataType(%q, %d) type = %q, want %q", tt.inputType, tt.size, gotType, tt.wantType)
|
||||
}
|
||||
if gotLength != tt.wantLength {
|
||||
t.Fatalf("mapDataType(%q, %d) length = %d, want %d", tt.inputType, tt.size, gotLength, tt.wantLength)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
53
pkg/readers/doc.go
Normal file
53
pkg/readers/doc.go
Normal file
@@ -0,0 +1,53 @@
|
||||
// Package readers provides interfaces and implementations for reading database schemas
|
||||
// from various input formats and data sources.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The readers package defines a common Reader interface that all format-specific readers
|
||||
// implement. This allows RelSpec to read database schemas from multiple sources including:
|
||||
// - Live databases (PostgreSQL, SQLite)
|
||||
// - Schema definition files (DBML, DCTX, DrawDB, GraphQL)
|
||||
// - ORM model files (GORM, Bun, Drizzle, Prisma, TypeORM)
|
||||
// - Data interchange formats (JSON, YAML)
|
||||
//
|
||||
// # Architecture
|
||||
//
|
||||
// Each reader implementation is located in its own subpackage (e.g., pkg/readers/dbml,
|
||||
// pkg/readers/pgsql) and implements the Reader interface, supporting three levels of
|
||||
// granularity:
|
||||
// - ReadDatabase() - Read complete database with all schemas
|
||||
// - ReadSchema() - Read single schema with all tables
|
||||
// - ReadTable() - Read single table with all columns and metadata
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// Readers are instantiated with ReaderOptions containing source-specific configuration:
|
||||
//
|
||||
// // Read from file
|
||||
// reader := dbml.NewReader(&readers.ReaderOptions{
|
||||
// FilePath: "schema.dbml",
|
||||
// })
|
||||
// db, err := reader.ReadDatabase()
|
||||
//
|
||||
// // Read from database
|
||||
// reader := pgsql.NewReader(&readers.ReaderOptions{
|
||||
// ConnectionString: "postgres://user:pass@localhost/mydb",
|
||||
// })
|
||||
// db, err := reader.ReadDatabase()
|
||||
//
|
||||
// # Supported Formats
|
||||
//
|
||||
// - dbml: Database Markup Language files
|
||||
// - dctx: DCTX schema files
|
||||
// - drawdb: DrawDB JSON format
|
||||
// - graphql: GraphQL schema definition language
|
||||
// - json: JSON database schema
|
||||
// - yaml: YAML database schema
|
||||
// - gorm: Go GORM model structs
|
||||
// - bun: Go Bun model structs
|
||||
// - drizzle: TypeScript Drizzle ORM schemas
|
||||
// - prisma: Prisma schema language
|
||||
// - typeorm: TypeScript TypeORM entities
|
||||
// - pgsql: PostgreSQL live database introspection
|
||||
// - sqlite: SQLite database files
|
||||
package readers
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
|
||||
)
|
||||
@@ -231,30 +232,35 @@ func (r *Reader) convertToColumn(field *drawdb.DrawDBField, tableName, schemaNam
|
||||
|
||||
// Parse type and dimensions
|
||||
typeStr := field.Type
|
||||
typeStr = strings.TrimSpace(typeStr)
|
||||
column.Type = typeStr
|
||||
|
||||
// Try to extract length/precision from type string like "varchar(255)" or "decimal(10,2)"
|
||||
if strings.Contains(typeStr, "(") {
|
||||
parts := strings.Split(typeStr, "(")
|
||||
column.Type = parts[0]
|
||||
baseType := strings.TrimSpace(parts[0])
|
||||
|
||||
if len(parts) > 1 {
|
||||
dimensions := strings.TrimSuffix(parts[1], ")")
|
||||
if strings.Contains(dimensions, ",") {
|
||||
// Precision and scale (e.g., decimal(10,2))
|
||||
dims := strings.Split(dimensions, ",")
|
||||
if precision, err := strconv.Atoi(strings.TrimSpace(dims[0])); err == nil {
|
||||
column.Precision = precision
|
||||
}
|
||||
if len(dims) > 1 {
|
||||
if scale, err := strconv.Atoi(strings.TrimSpace(dims[1])); err == nil {
|
||||
column.Scale = scale
|
||||
// Precision and scale (e.g., decimal(10,2), numeric(10,2))
|
||||
if pgsql.SupportsPrecision(baseType) {
|
||||
dims := strings.Split(dimensions, ",")
|
||||
if precision, err := strconv.Atoi(strings.TrimSpace(dims[0])); err == nil {
|
||||
column.Precision = precision
|
||||
}
|
||||
if len(dims) > 1 {
|
||||
if scale, err := strconv.Atoi(strings.TrimSpace(dims[1])); err == nil {
|
||||
column.Scale = scale
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Just length (e.g., varchar(255))
|
||||
if length, err := strconv.Atoi(dimensions); err == nil {
|
||||
column.Length = length
|
||||
if pgsql.SupportsLength(baseType) {
|
||||
if length, err := strconv.Atoi(dimensions); err == nil {
|
||||
column.Length = length
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers/drawdb"
|
||||
)
|
||||
|
||||
func TestReader_ReadDatabase_Simple(t *testing.T) {
|
||||
@@ -288,6 +289,61 @@ func TestReader_ReadDatabase_Complex(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertToColumn_PreservesExplicitTypeModifiers(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fieldType string
|
||||
wantType string
|
||||
wantLength int
|
||||
wantPrecision int
|
||||
wantScale int
|
||||
}{
|
||||
{
|
||||
name: "varchar with length",
|
||||
fieldType: "varchar(255)",
|
||||
wantType: "varchar(255)",
|
||||
wantLength: 255,
|
||||
},
|
||||
{
|
||||
name: "numeric precision/scale",
|
||||
fieldType: "numeric(10,2)",
|
||||
wantType: "numeric(10,2)",
|
||||
wantPrecision: 10,
|
||||
wantScale: 2,
|
||||
},
|
||||
{
|
||||
name: "custom vector modifier",
|
||||
fieldType: "vector(1536)",
|
||||
wantType: "vector(1536)",
|
||||
wantLength: 1536,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
field := &drawdb.DrawDBField{
|
||||
Name: tt.name,
|
||||
Type: tt.fieldType,
|
||||
}
|
||||
col := reader.convertToColumn(field, "events", "public")
|
||||
if col.Type != tt.wantType {
|
||||
t.Fatalf("column type = %q, want %q", col.Type, tt.wantType)
|
||||
}
|
||||
if col.Length != tt.wantLength {
|
||||
t.Fatalf("column length = %d, want %d", col.Length, tt.wantLength)
|
||||
}
|
||||
if col.Precision != tt.wantPrecision {
|
||||
t.Fatalf("column precision = %d, want %d", col.Precision, tt.wantPrecision)
|
||||
}
|
||||
if col.Scale != tt.wantScale {
|
||||
t.Fatalf("column scale = %d, want %d", col.Scale, tt.wantScale)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_ReadSchema(t *testing.T) {
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "drawdb", "simple.json"),
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
@@ -784,11 +785,14 @@ func (r *Reader) extractGormTag(tag string) string {
|
||||
// parseTypeWithLength parses a type string and extracts length if present
|
||||
// e.g., "varchar(255)" returns ("varchar", 255)
|
||||
func (r *Reader) parseTypeWithLength(typeStr string) (baseType string, length int) {
|
||||
typeStr = strings.TrimSpace(typeStr)
|
||||
baseType = typeStr
|
||||
|
||||
// Check for type with length: varchar(255), char(10), etc.
|
||||
// Also handle precision/scale: numeric(10,2)
|
||||
if strings.Contains(typeStr, "(") {
|
||||
idx := strings.Index(typeStr, "(")
|
||||
baseType = strings.TrimSpace(typeStr[:idx])
|
||||
rawBaseType := strings.TrimSpace(typeStr[:idx])
|
||||
|
||||
// Extract numbers from parentheses
|
||||
parens := typeStr[idx+1:]
|
||||
@@ -796,14 +800,15 @@ func (r *Reader) parseTypeWithLength(typeStr string) (baseType string, length in
|
||||
parens = parens[:endIdx]
|
||||
}
|
||||
|
||||
// For now, just handle single number (length)
|
||||
if !strings.Contains(parens, ",") {
|
||||
// Only treat as "length" for text-ish SQL types.
|
||||
// This avoids converting custom modifiers like vector(1536) into Length.
|
||||
if pgsql.SupportsLength(rawBaseType) && !strings.Contains(parens, ",") {
|
||||
if _, err := fmt.Sscanf(parens, "%d", &length); err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
baseType = typeStr
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -71,8 +71,11 @@ func TestReader_ReadDatabase_Simple(t *testing.T) {
|
||||
if !emailCol.NotNull {
|
||||
t.Error("Column 'email' should be NOT NULL (explicit 'not null' tag)")
|
||||
}
|
||||
if emailCol.Type != "varchar" || emailCol.Length != 255 {
|
||||
t.Errorf("Expected email type 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
|
||||
if emailCol.Type != "varchar" && emailCol.Type != "varchar(255)" {
|
||||
t.Errorf("Expected email type 'varchar' or 'varchar(255)', got '%s' with length %d", emailCol.Type, emailCol.Length)
|
||||
}
|
||||
if emailCol.Length != 255 {
|
||||
t.Errorf("Expected email length 255, got %d", emailCol.Length)
|
||||
}
|
||||
|
||||
// Verify name column - primitive string type should be NOT NULL by default
|
||||
@@ -363,6 +366,33 @@ func TestReader_ReadDatabase_Complex(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTypeWithLength_PreservesExplicitTypeModifiers(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
input string
|
||||
wantType string
|
||||
wantLength int
|
||||
}{
|
||||
{"varchar(255)", "varchar(255)", 255},
|
||||
{"character varying(120)", "character varying(120)", 120},
|
||||
{"vector(1536)", "vector(1536)", 1536},
|
||||
{"numeric(10,2)", "numeric(10,2)", 0},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
gotType, gotLength := reader.parseTypeWithLength(tt.input)
|
||||
if gotType != tt.wantType {
|
||||
t.Fatalf("parseTypeWithLength(%q) type = %q, want %q", tt.input, gotType, tt.wantType)
|
||||
}
|
||||
if gotLength != tt.wantLength {
|
||||
t.Fatalf("parseTypeWithLength(%q) length = %d, want %d", tt.input, gotLength, tt.wantLength)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_ReadSchema(t *testing.T) {
|
||||
opts := &readers.ReaderOptions{
|
||||
FilePath: filepath.Join("..", "..", "..", "tests", "assets", "gorm", "simple.go"),
|
||||
|
||||
91
pkg/readers/mssql/README.md
Normal file
91
pkg/readers/mssql/README.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# MSSQL Reader
|
||||
|
||||
Reads database schema from Microsoft SQL Server databases using a live connection.
|
||||
|
||||
## Features
|
||||
|
||||
- **Live Connection**: Connects to MSSQL databases using the Microsoft ODBC driver
|
||||
- **Multi-Schema Support**: Reads multiple schemas with full support for user-defined schemas
|
||||
- **Comprehensive Metadata**: Reads tables, columns, constraints, indexes, and extended properties
|
||||
- **Type Mapping**: Converts MSSQL types to canonical types for cross-database compatibility
|
||||
- **Extended Properties**: Extracts table and column descriptions from MS_Description
|
||||
- **Identity Columns**: Maps IDENTITY columns to AutoIncrement
|
||||
- **Relationships**: Derives relationships from foreign key constraints
|
||||
|
||||
## Connection String Format
|
||||
|
||||
```
|
||||
sqlserver://[user[:password]@][host][:port][?query]
|
||||
```
|
||||
|
||||
Examples:
|
||||
```
|
||||
sqlserver://sa:password@localhost/dbname
|
||||
sqlserver://user:pass@192.168.1.100:1433/production
|
||||
sqlserver://localhost/testdb?encrypt=disable
|
||||
```
|
||||
|
||||
## Supported Constraints
|
||||
|
||||
- Primary Keys
|
||||
- Foreign Keys (with ON DELETE and ON UPDATE actions)
|
||||
- Unique Constraints
|
||||
- Check Constraints
|
||||
|
||||
## Type Mappings
|
||||
|
||||
| MSSQL Type | Canonical Type |
|
||||
|------------|----------------|
|
||||
| INT | int |
|
||||
| BIGINT | int64 |
|
||||
| SMALLINT | int16 |
|
||||
| TINYINT | int8 |
|
||||
| BIT | bool |
|
||||
| REAL | float32 |
|
||||
| FLOAT | float64 |
|
||||
| NUMERIC, DECIMAL | decimal |
|
||||
| NVARCHAR, VARCHAR | string |
|
||||
| DATETIME2 | timestamp |
|
||||
| DATETIMEOFFSET | timestamptz |
|
||||
| UNIQUEIDENTIFIER | uuid |
|
||||
| VARBINARY | bytea |
|
||||
| DATE | date |
|
||||
| TIME | time |
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import "git.warky.dev/wdevs/relspecgo/pkg/readers/mssql"
|
||||
import "git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
|
||||
reader := mssql.NewReader(&readers.ReaderOptions{
|
||||
ConnectionString: "sqlserver://sa:password@localhost/mydb",
|
||||
})
|
||||
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Process schema...
|
||||
for _, schema := range db.Schemas {
|
||||
fmt.Printf("Schema: %s\n", schema.Name)
|
||||
for _, table := range schema.Tables {
|
||||
fmt.Printf(" Table: %s\n", table.Name)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Run tests with:
|
||||
```bash
|
||||
go test ./pkg/readers/mssql/...
|
||||
```
|
||||
|
||||
For integration testing with a live MSSQL database:
|
||||
```bash
|
||||
docker-compose up -d mssql
|
||||
go test -tags=integration ./pkg/readers/mssql/...
|
||||
docker-compose down
|
||||
```
|
||||
416
pkg/readers/mssql/queries.go
Normal file
416
pkg/readers/mssql/queries.go
Normal file
@@ -0,0 +1,416 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
)
|
||||
|
||||
// querySchemas retrieves all user-defined schemas from the database
|
||||
func (r *Reader) querySchemas() ([]*models.Schema, error) {
|
||||
query := `
|
||||
SELECT s.name, ISNULL(ep.value, '') as description
|
||||
FROM sys.schemas s
|
||||
LEFT JOIN sys.extended_properties ep
|
||||
ON ep.major_id = s.schema_id
|
||||
AND ep.minor_id = 0
|
||||
AND ep.class = 3
|
||||
AND ep.name = 'MS_Description'
|
||||
WHERE s.name NOT IN ('dbo', 'guest', 'INFORMATION_SCHEMA', 'sys')
|
||||
ORDER BY s.name
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
schemas := make([]*models.Schema, 0)
|
||||
for rows.Next() {
|
||||
var name, description string
|
||||
|
||||
if err := rows.Scan(&name, &description); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schema := models.InitSchema(name)
|
||||
if description != "" {
|
||||
schema.Description = description
|
||||
}
|
||||
|
||||
schemas = append(schemas, schema)
|
||||
}
|
||||
|
||||
// Always include dbo schema if it has tables
|
||||
dboSchema := models.InitSchema("dbo")
|
||||
schemas = append(schemas, dboSchema)
|
||||
|
||||
return schemas, rows.Err()
|
||||
}
|
||||
|
||||
// queryTables retrieves all tables for a given schema
|
||||
func (r *Reader) queryTables(schemaName string) ([]*models.Table, error) {
|
||||
query := `
|
||||
SELECT t.table_schema, t.table_name, ISNULL(ep.value, '') as description
|
||||
FROM information_schema.tables t
|
||||
LEFT JOIN sys.extended_properties ep
|
||||
ON ep.major_id = OBJECT_ID(QUOTENAME(t.table_schema) + '.' + QUOTENAME(t.table_name))
|
||||
AND ep.minor_id = 0
|
||||
AND ep.class = 1
|
||||
AND ep.name = 'MS_Description'
|
||||
WHERE t.table_schema = ? AND t.table_type = 'BASE TABLE'
|
||||
ORDER BY t.table_name
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
tables := make([]*models.Table, 0)
|
||||
for rows.Next() {
|
||||
var schema, tableName, description string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &description); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
table := models.InitTable(tableName, schema)
|
||||
if description != "" {
|
||||
table.Description = description
|
||||
}
|
||||
|
||||
tables = append(tables, table)
|
||||
}
|
||||
|
||||
return tables, rows.Err()
|
||||
}
|
||||
|
||||
// queryColumns retrieves all columns for tables in a schema
|
||||
// Returns map[schema.table]map[columnName]*Column
|
||||
func (r *Reader) queryColumns(schemaName string) (map[string]map[string]*models.Column, error) {
|
||||
query := `
|
||||
SELECT
|
||||
c.table_schema,
|
||||
c.table_name,
|
||||
c.column_name,
|
||||
c.ordinal_position,
|
||||
c.column_default,
|
||||
c.is_nullable,
|
||||
c.data_type,
|
||||
c.character_maximum_length,
|
||||
c.numeric_precision,
|
||||
c.numeric_scale,
|
||||
ISNULL(ep.value, '') as description,
|
||||
COLUMNPROPERTY(OBJECT_ID(QUOTENAME(c.table_schema) + '.' + QUOTENAME(c.table_name)), c.column_name, 'IsIdentity') as is_identity
|
||||
FROM information_schema.columns c
|
||||
LEFT JOIN sys.extended_properties ep
|
||||
ON ep.major_id = OBJECT_ID(QUOTENAME(c.table_schema) + '.' + QUOTENAME(c.table_name))
|
||||
AND ep.minor_id = COLUMNPROPERTY(OBJECT_ID(QUOTENAME(c.table_schema) + '.' + QUOTENAME(c.table_name)), c.column_name, 'ColumnId')
|
||||
AND ep.class = 1
|
||||
AND ep.name = 'MS_Description'
|
||||
WHERE c.table_schema = ?
|
||||
ORDER BY c.table_schema, c.table_name, c.ordinal_position
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
columnsMap := make(map[string]map[string]*models.Column)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, columnName, isNullable, dataType, description string
|
||||
var ordinalPosition int
|
||||
var columnDefault, charMaxLength, numPrecision, numScale, isIdentity *int
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &columnName, &ordinalPosition, &columnDefault, &isNullable, &dataType, &charMaxLength, &numPrecision, &numScale, &description, &isIdentity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
column := models.InitColumn(columnName, tableName, schema)
|
||||
column.Type = r.mapDataType(dataType)
|
||||
column.NotNull = (isNullable == "NO")
|
||||
column.Sequence = uint(ordinalPosition)
|
||||
|
||||
if description != "" {
|
||||
column.Description = description
|
||||
}
|
||||
|
||||
// Check if this is an identity column (auto-increment)
|
||||
if isIdentity != nil && *isIdentity == 1 {
|
||||
column.AutoIncrement = true
|
||||
}
|
||||
|
||||
if charMaxLength != nil && *charMaxLength > 0 {
|
||||
column.Length = *charMaxLength
|
||||
}
|
||||
|
||||
if numPrecision != nil && *numPrecision > 0 {
|
||||
column.Precision = *numPrecision
|
||||
}
|
||||
|
||||
if numScale != nil && *numScale > 0 {
|
||||
column.Scale = *numScale
|
||||
}
|
||||
|
||||
// Create table key
|
||||
tableKey := schema + "." + tableName
|
||||
if columnsMap[tableKey] == nil {
|
||||
columnsMap[tableKey] = make(map[string]*models.Column)
|
||||
}
|
||||
columnsMap[tableKey][columnName] = column
|
||||
}
|
||||
|
||||
return columnsMap, rows.Err()
|
||||
}
|
||||
|
||||
// queryPrimaryKeys retrieves all primary key constraints for a schema
|
||||
// Returns map[schema.table]*Constraint
|
||||
func (r *Reader) queryPrimaryKeys(schemaName string) (map[string]*models.Constraint, error) {
|
||||
query := `
|
||||
SELECT
|
||||
s.name as schema_name,
|
||||
t.name as table_name,
|
||||
i.name as constraint_name,
|
||||
STRING_AGG(c.name, ',') WITHIN GROUP (ORDER BY ic.key_ordinal) as columns
|
||||
FROM sys.tables t
|
||||
INNER JOIN sys.indexes i ON t.object_id = i.object_id AND i.is_primary_key = 1
|
||||
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
|
||||
INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
|
||||
INNER JOIN sys.columns c ON t.object_id = c.object_id AND ic.column_id = c.column_id
|
||||
WHERE s.name = ?
|
||||
GROUP BY s.name, t.name, i.name
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
primaryKeys := make(map[string]*models.Constraint)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, constraintName, columnsStr string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &constraintName, &columnsStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
columns := strings.Split(columnsStr, ",")
|
||||
|
||||
constraint := models.InitConstraint(constraintName, models.PrimaryKeyConstraint)
|
||||
constraint.Schema = schema
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = columns
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
primaryKeys[tableKey] = constraint
|
||||
}
|
||||
|
||||
return primaryKeys, rows.Err()
|
||||
}
|
||||
|
||||
// queryForeignKeys retrieves all foreign key constraints for a schema
|
||||
// Returns map[schema.table][]*Constraint
|
||||
func (r *Reader) queryForeignKeys(schemaName string) (map[string][]*models.Constraint, error) {
|
||||
query := `
|
||||
SELECT
|
||||
s.name as schema_name,
|
||||
t.name as table_name,
|
||||
fk.name as constraint_name,
|
||||
rs.name as referenced_schema,
|
||||
rt.name as referenced_table,
|
||||
STRING_AGG(c.name, ',') WITHIN GROUP (ORDER BY fkc.constraint_column_id) as columns,
|
||||
STRING_AGG(rc.name, ',') WITHIN GROUP (ORDER BY fkc.constraint_column_id) as referenced_columns,
|
||||
fk.delete_referential_action_desc,
|
||||
fk.update_referential_action_desc
|
||||
FROM sys.foreign_keys fk
|
||||
INNER JOIN sys.tables t ON fk.parent_object_id = t.object_id
|
||||
INNER JOIN sys.tables rt ON fk.referenced_object_id = rt.object_id
|
||||
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
|
||||
INNER JOIN sys.schemas rs ON rt.schema_id = rs.schema_id
|
||||
INNER JOIN sys.foreign_key_columns fkc ON fk.object_id = fkc.constraint_object_id
|
||||
INNER JOIN sys.columns c ON fkc.parent_object_id = c.object_id AND fkc.parent_column_id = c.column_id
|
||||
INNER JOIN sys.columns rc ON fkc.referenced_object_id = rc.object_id AND fkc.referenced_column_id = rc.column_id
|
||||
WHERE s.name = ?
|
||||
GROUP BY s.name, t.name, fk.name, rs.name, rt.name, fk.delete_referential_action_desc, fk.update_referential_action_desc
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
foreignKeys := make(map[string][]*models.Constraint)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, constraintName, refSchema, refTable, columnsStr, refColumnsStr, deleteAction, updateAction string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &constraintName, &refSchema, &refTable, &columnsStr, &refColumnsStr, &deleteAction, &updateAction); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
columns := strings.Split(columnsStr, ",")
|
||||
refColumns := strings.Split(refColumnsStr, ",")
|
||||
|
||||
constraint := models.InitConstraint(constraintName, models.ForeignKeyConstraint)
|
||||
constraint.Schema = schema
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = columns
|
||||
constraint.ReferencedSchema = refSchema
|
||||
constraint.ReferencedTable = refTable
|
||||
constraint.ReferencedColumns = refColumns
|
||||
constraint.OnDelete = strings.ToUpper(deleteAction)
|
||||
constraint.OnUpdate = strings.ToUpper(updateAction)
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
foreignKeys[tableKey] = append(foreignKeys[tableKey], constraint)
|
||||
}
|
||||
|
||||
return foreignKeys, rows.Err()
|
||||
}
|
||||
|
||||
// queryUniqueConstraints retrieves all unique constraints for a schema
|
||||
// Returns map[schema.table][]*Constraint
|
||||
func (r *Reader) queryUniqueConstraints(schemaName string) (map[string][]*models.Constraint, error) {
|
||||
query := `
|
||||
SELECT
|
||||
s.name as schema_name,
|
||||
t.name as table_name,
|
||||
i.name as constraint_name,
|
||||
STRING_AGG(c.name, ',') WITHIN GROUP (ORDER BY ic.key_ordinal) as columns
|
||||
FROM sys.tables t
|
||||
INNER JOIN sys.indexes i ON t.object_id = i.object_id AND i.is_unique = 1 AND i.is_primary_key = 0
|
||||
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
|
||||
INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
|
||||
INNER JOIN sys.columns c ON t.object_id = c.object_id AND ic.column_id = c.column_id
|
||||
WHERE s.name = ?
|
||||
GROUP BY s.name, t.name, i.name
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
uniqueConstraints := make(map[string][]*models.Constraint)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, constraintName, columnsStr string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &constraintName, &columnsStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
columns := strings.Split(columnsStr, ",")
|
||||
|
||||
constraint := models.InitConstraint(constraintName, models.UniqueConstraint)
|
||||
constraint.Schema = schema
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = columns
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
uniqueConstraints[tableKey] = append(uniqueConstraints[tableKey], constraint)
|
||||
}
|
||||
|
||||
return uniqueConstraints, rows.Err()
|
||||
}
|
||||
|
||||
// queryCheckConstraints retrieves all check constraints for a schema
|
||||
// Returns map[schema.table][]*Constraint
|
||||
func (r *Reader) queryCheckConstraints(schemaName string) (map[string][]*models.Constraint, error) {
|
||||
query := `
|
||||
SELECT
|
||||
s.name as schema_name,
|
||||
t.name as table_name,
|
||||
cc.name as constraint_name,
|
||||
cc.definition
|
||||
FROM sys.tables t
|
||||
INNER JOIN sys.check_constraints cc ON t.object_id = cc.parent_object_id
|
||||
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
|
||||
WHERE s.name = ?
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
checkConstraints := make(map[string][]*models.Constraint)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, constraintName, definition string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &constraintName, &definition); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
constraint := models.InitConstraint(constraintName, models.CheckConstraint)
|
||||
constraint.Schema = schema
|
||||
constraint.Table = tableName
|
||||
constraint.Expression = definition
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
checkConstraints[tableKey] = append(checkConstraints[tableKey], constraint)
|
||||
}
|
||||
|
||||
return checkConstraints, rows.Err()
|
||||
}
|
||||
|
||||
// queryIndexes retrieves all indexes for a schema
|
||||
// Returns map[schema.table][]*Index
|
||||
func (r *Reader) queryIndexes(schemaName string) (map[string][]*models.Index, error) {
|
||||
query := `
|
||||
SELECT
|
||||
s.name as schema_name,
|
||||
t.name as table_name,
|
||||
i.name as index_name,
|
||||
i.is_unique,
|
||||
STRING_AGG(c.name, ',') WITHIN GROUP (ORDER BY ic.key_ordinal) as columns
|
||||
FROM sys.tables t
|
||||
INNER JOIN sys.indexes i ON t.object_id = i.object_id AND i.is_primary_key = 0 AND i.name IS NOT NULL
|
||||
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
|
||||
INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
|
||||
INNER JOIN sys.columns c ON t.object_id = c.object_id AND ic.column_id = c.column_id
|
||||
WHERE s.name = ?
|
||||
GROUP BY s.name, t.name, i.name, i.is_unique
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
indexes := make(map[string][]*models.Index)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, indexName, columnsStr string
|
||||
var isUnique int
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &indexName, &isUnique, &columnsStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
columns := strings.Split(columnsStr, ",")
|
||||
|
||||
index := models.InitIndex(indexName, tableName, schema)
|
||||
index.Columns = columns
|
||||
index.Unique = (isUnique == 1)
|
||||
index.Type = "btree" // MSSQL uses btree by default
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
indexes[tableKey] = append(indexes[tableKey], index)
|
||||
}
|
||||
|
||||
return indexes, rows.Err()
|
||||
}
|
||||
266
pkg/readers/mssql/reader.go
Normal file
266
pkg/readers/mssql/reader.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
_ "github.com/microsoft/go-mssqldb" // MSSQL driver
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/mssql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
// Reader implements the readers.Reader interface for MSSQL databases
|
||||
type Reader struct {
|
||||
options *readers.ReaderOptions
|
||||
db *sql.DB
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewReader creates a new MSSQL reader
|
||||
func NewReader(options *readers.ReaderOptions) *Reader {
|
||||
return &Reader{
|
||||
options: options,
|
||||
ctx: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
// ReadDatabase reads the entire database schema from MSSQL
|
||||
func (r *Reader) ReadDatabase() (*models.Database, error) {
|
||||
// Validate connection string
|
||||
if r.options.ConnectionString == "" {
|
||||
return nil, fmt.Errorf("connection string is required")
|
||||
}
|
||||
|
||||
// Connect to the database
|
||||
if err := r.connect(); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
defer r.close()
|
||||
|
||||
// Get database name
|
||||
var dbName string
|
||||
err := r.db.QueryRowContext(r.ctx, "SELECT DB_NAME()").Scan(&dbName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get database name: %w", err)
|
||||
}
|
||||
|
||||
// Initialize database model
|
||||
db := models.InitDatabase(dbName)
|
||||
db.DatabaseType = models.MSSQLDatabaseType
|
||||
db.SourceFormat = "mssql"
|
||||
|
||||
// Get MSSQL version
|
||||
var version string
|
||||
err = r.db.QueryRowContext(r.ctx, "SELECT @@VERSION").Scan(&version)
|
||||
if err == nil {
|
||||
db.DatabaseVersion = version
|
||||
}
|
||||
|
||||
// Query all schemas
|
||||
schemas, err := r.querySchemas()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query schemas: %w", err)
|
||||
}
|
||||
|
||||
// Process each schema
|
||||
for _, schema := range schemas {
|
||||
// Query tables for this schema
|
||||
tables, err := r.queryTables(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query tables for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
schema.Tables = tables
|
||||
|
||||
// Query columns for tables
|
||||
columnsMap, err := r.queryColumns(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query columns for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Populate table columns
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if cols, exists := columnsMap[tableKey]; exists {
|
||||
table.Columns = cols
|
||||
}
|
||||
}
|
||||
|
||||
// Query primary keys
|
||||
primaryKeys, err := r.queryPrimaryKeys(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query primary keys for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply primary keys to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if pk, exists := primaryKeys[tableKey]; exists {
|
||||
table.Constraints[pk.Name] = pk
|
||||
// Mark columns as primary key and not null
|
||||
for _, colName := range pk.Columns {
|
||||
if col, colExists := table.Columns[colName]; colExists {
|
||||
col.IsPrimaryKey = true
|
||||
col.NotNull = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query foreign keys
|
||||
foreignKeys, err := r.queryForeignKeys(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query foreign keys for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply foreign keys to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if fks, exists := foreignKeys[tableKey]; exists {
|
||||
for _, fk := range fks {
|
||||
table.Constraints[fk.Name] = fk
|
||||
// Derive relationship from foreign key
|
||||
r.deriveRelationship(table, fk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query unique constraints
|
||||
uniqueConstraints, err := r.queryUniqueConstraints(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query unique constraints for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply unique constraints to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if ucs, exists := uniqueConstraints[tableKey]; exists {
|
||||
for _, uc := range ucs {
|
||||
table.Constraints[uc.Name] = uc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query check constraints
|
||||
checkConstraints, err := r.queryCheckConstraints(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query check constraints for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply check constraints to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if ccs, exists := checkConstraints[tableKey]; exists {
|
||||
for _, cc := range ccs {
|
||||
table.Constraints[cc.Name] = cc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query indexes
|
||||
indexes, err := r.queryIndexes(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query indexes for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply indexes to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if idxs, exists := indexes[tableKey]; exists {
|
||||
for _, idx := range idxs {
|
||||
table.Indexes[idx.Name] = idx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set RefDatabase for schema
|
||||
schema.RefDatabase = db
|
||||
|
||||
// Set RefSchema for tables
|
||||
for _, table := range schema.Tables {
|
||||
table.RefSchema = schema
|
||||
}
|
||||
|
||||
// Add schema to database
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// ReadSchema reads a single schema (returns the first schema from the database)
|
||||
func (r *Reader) ReadSchema() (*models.Schema, error) {
|
||||
db, err := r.ReadDatabase()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(db.Schemas) == 0 {
|
||||
return nil, fmt.Errorf("no schemas found in database")
|
||||
}
|
||||
return db.Schemas[0], nil
|
||||
}
|
||||
|
||||
// ReadTable reads a single table (returns the first table from the first schema)
|
||||
func (r *Reader) ReadTable() (*models.Table, error) {
|
||||
schema, err := r.ReadSchema()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(schema.Tables) == 0 {
|
||||
return nil, fmt.Errorf("no tables found in schema")
|
||||
}
|
||||
return schema.Tables[0], nil
|
||||
}
|
||||
|
||||
// connect establishes a connection to the MSSQL database
|
||||
func (r *Reader) connect() error {
|
||||
db, err := sql.Open("mssql", r.options.ConnectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Test connection
|
||||
if err = db.PingContext(r.ctx); err != nil {
|
||||
db.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
r.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// close closes the database connection
|
||||
func (r *Reader) close() {
|
||||
if r.db != nil {
|
||||
r.db.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// mapDataType maps MSSQL data types to canonical types
|
||||
func (r *Reader) mapDataType(mssqlType string) string {
|
||||
return mssql.ConvertMSSQLToCanonical(mssqlType)
|
||||
}
|
||||
|
||||
// deriveRelationship creates a relationship from a foreign key constraint
|
||||
func (r *Reader) deriveRelationship(table *models.Table, fk *models.Constraint) {
|
||||
relationshipName := fmt.Sprintf("%s_to_%s", table.Name, fk.ReferencedTable)
|
||||
|
||||
relationship := models.InitRelationship(relationshipName, models.OneToMany)
|
||||
relationship.FromTable = table.Name
|
||||
relationship.FromSchema = table.Schema
|
||||
relationship.ToTable = fk.ReferencedTable
|
||||
relationship.ToSchema = fk.ReferencedSchema
|
||||
relationship.ForeignKey = fk.Name
|
||||
|
||||
// Store constraint actions in properties
|
||||
if fk.OnDelete != "" {
|
||||
relationship.Properties["on_delete"] = fk.OnDelete
|
||||
}
|
||||
if fk.OnUpdate != "" {
|
||||
relationship.Properties["on_update"] = fk.OnUpdate
|
||||
}
|
||||
|
||||
table.Relationships[relationshipName] = relationship
|
||||
}
|
||||
86
pkg/readers/mssql/reader_test.go
Normal file
86
pkg/readers/mssql/reader_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/mssql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestMapDataType tests MSSQL type mapping to canonical types
|
||||
func TestMapDataType(t *testing.T) {
|
||||
reader := NewReader(&readers.ReaderOptions{})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
mssqlType string
|
||||
expectedType string
|
||||
}{
|
||||
{"INT to int", "INT", "int"},
|
||||
{"BIGINT to int64", "BIGINT", "int64"},
|
||||
{"BIT to bool", "BIT", "bool"},
|
||||
{"NVARCHAR to string", "NVARCHAR(255)", "string"},
|
||||
{"DATETIME2 to timestamp", "DATETIME2", "timestamp"},
|
||||
{"DATETIMEOFFSET to timestamptz", "DATETIMEOFFSET", "timestamptz"},
|
||||
{"UNIQUEIDENTIFIER to uuid", "UNIQUEIDENTIFIER", "uuid"},
|
||||
{"FLOAT to float64", "FLOAT", "float64"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := reader.mapDataType(tt.mssqlType)
|
||||
assert.Equal(t, tt.expectedType, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestConvertCanonicalToMSSQL tests canonical to MSSQL type conversion
|
||||
func TestConvertCanonicalToMSSQL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
canonicalType string
|
||||
expectedMSSQL string
|
||||
}{
|
||||
{"int to INT", "int", "INT"},
|
||||
{"int64 to BIGINT", "int64", "BIGINT"},
|
||||
{"bool to BIT", "bool", "BIT"},
|
||||
{"string to NVARCHAR(255)", "string", "NVARCHAR(255)"},
|
||||
{"text to NVARCHAR(MAX)", "text", "NVARCHAR(MAX)"},
|
||||
{"timestamp to DATETIME2", "timestamp", "DATETIME2"},
|
||||
{"timestamptz to DATETIMEOFFSET", "timestamptz", "DATETIMEOFFSET"},
|
||||
{"uuid to UNIQUEIDENTIFIER", "uuid", "UNIQUEIDENTIFIER"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := mssql.ConvertCanonicalToMSSQL(tt.canonicalType)
|
||||
assert.Equal(t, tt.expectedMSSQL, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestConvertMSSQLToCanonical tests MSSQL to canonical type conversion
|
||||
func TestConvertMSSQLToCanonical(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mssqlType string
|
||||
expectedType string
|
||||
}{
|
||||
{"INT to int", "INT", "int"},
|
||||
{"BIGINT to int64", "BIGINT", "int64"},
|
||||
{"BIT to bool", "BIT", "bool"},
|
||||
{"NVARCHAR with params", "NVARCHAR(255)", "string"},
|
||||
{"DATETIME2 to timestamp", "DATETIME2", "timestamp"},
|
||||
{"DATETIMEOFFSET to timestamptz", "DATETIMEOFFSET", "timestamptz"},
|
||||
{"UNIQUEIDENTIFIER to uuid", "UNIQUEIDENTIFIER", "uuid"},
|
||||
{"VARBINARY to bytea", "VARBINARY(MAX)", "bytea"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := mssql.ConvertMSSQLToCanonical(tt.mssqlType)
|
||||
assert.Equal(t, tt.expectedType, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -206,8 +206,19 @@ func (r *Reader) queryColumns(schemaName string) (map[string]map[string]*models.
|
||||
c.numeric_precision,
|
||||
c.numeric_scale,
|
||||
c.udt_name,
|
||||
pg_catalog.format_type(a.atttypid, a.atttypmod) as formatted_data_type,
|
||||
col_description((c.table_schema||'.'||c.table_name)::regclass, c.ordinal_position) as description
|
||||
FROM information_schema.columns c
|
||||
JOIN pg_catalog.pg_namespace n
|
||||
ON n.nspname = c.table_schema
|
||||
JOIN pg_catalog.pg_class cls
|
||||
ON cls.relname = c.table_name
|
||||
AND cls.relnamespace = n.oid
|
||||
JOIN pg_catalog.pg_attribute a
|
||||
ON a.attrelid = cls.oid
|
||||
AND a.attname = c.column_name
|
||||
AND a.attnum > 0
|
||||
AND NOT a.attisdropped
|
||||
WHERE c.table_schema = $1
|
||||
ORDER BY c.table_schema, c.table_name, c.ordinal_position
|
||||
`
|
||||
@@ -221,24 +232,23 @@ func (r *Reader) queryColumns(schemaName string) (map[string]map[string]*models.
|
||||
columnsMap := make(map[string]map[string]*models.Column)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, columnName, isNullable, dataType, udtName string
|
||||
var schema, tableName, columnName, isNullable, dataType, udtName, formattedDataType string
|
||||
var ordinalPosition int
|
||||
var columnDefault, description *string
|
||||
var charMaxLength, numPrecision, numScale *int
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &columnName, &ordinalPosition, &columnDefault, &isNullable, &dataType, &charMaxLength, &numPrecision, &numScale, &udtName, &description); err != nil {
|
||||
if err := rows.Scan(&schema, &tableName, &columnName, &ordinalPosition, &columnDefault, &isNullable, &dataType, &charMaxLength, &numPrecision, &numScale, &udtName, &formattedDataType, &description); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
column := models.InitColumn(columnName, tableName, schema)
|
||||
column.Type = r.mapDataType(dataType, udtName)
|
||||
column.NotNull = (isNullable == "NO")
|
||||
column.Sequence = uint(ordinalPosition)
|
||||
|
||||
// Check if this is a serial type (has nextval default)
|
||||
hasNextval := false
|
||||
if columnDefault != nil {
|
||||
// Parse default value - remove nextval for sequences
|
||||
defaultVal := *columnDefault
|
||||
if strings.HasPrefix(defaultVal, "nextval") {
|
||||
hasNextval = true
|
||||
column.AutoIncrement = true
|
||||
column.Default = defaultVal
|
||||
} else {
|
||||
@@ -246,6 +256,11 @@ func (r *Reader) queryColumns(schemaName string) (map[string]map[string]*models.
|
||||
}
|
||||
}
|
||||
|
||||
// Map data type, preserving serial types when detected
|
||||
column.Type = r.mapDataType(dataType, udtName, formattedDataType, hasNextval)
|
||||
column.NotNull = (isNullable == "NO")
|
||||
column.Sequence = uint(ordinalPosition)
|
||||
|
||||
if description != nil {
|
||||
column.Description = *description
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package pgsql
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
|
||||
@@ -258,34 +259,60 @@ func (r *Reader) close() {
|
||||
}
|
||||
}
|
||||
|
||||
// mapDataType maps PostgreSQL data types to canonical types
|
||||
func (r *Reader) mapDataType(pgType, udtName string) string {
|
||||
// mapDataType maps PostgreSQL data types while preserving exact type text when available.
|
||||
func (r *Reader) mapDataType(pgType, udtName, formattedType string, hasNextval bool) string {
|
||||
normalizedPGType := strings.ToLower(strings.TrimSpace(pgType))
|
||||
|
||||
// If the column has a nextval default, it's likely a serial type
|
||||
// Map to the appropriate serial type instead of the base integer type
|
||||
if hasNextval {
|
||||
switch normalizedPGType {
|
||||
case "integer", "int", "int4":
|
||||
return "serial"
|
||||
case "bigint", "int8":
|
||||
return "bigserial"
|
||||
case "smallint", "int2":
|
||||
return "smallserial"
|
||||
}
|
||||
}
|
||||
|
||||
// Prefer the database-provided formatted type; this preserves arrays/custom
|
||||
// types/modifiers like text[], vector(1536), numeric(10,2), etc.
|
||||
if strings.TrimSpace(formattedType) != "" {
|
||||
return formattedType
|
||||
}
|
||||
|
||||
// information_schema reports arrays generically as "ARRAY" with udt_name like "_text".
|
||||
if strings.EqualFold(pgType, "ARRAY") && strings.HasPrefix(udtName, "_") && len(udtName) > 1 {
|
||||
return udtName[1:] + "[]"
|
||||
}
|
||||
|
||||
// Map common PostgreSQL types
|
||||
typeMap := map[string]string{
|
||||
"integer": "int",
|
||||
"bigint": "int64",
|
||||
"smallint": "int16",
|
||||
"int": "int",
|
||||
"int2": "int16",
|
||||
"int4": "int",
|
||||
"int8": "int64",
|
||||
"serial": "int",
|
||||
"bigserial": "int64",
|
||||
"smallserial": "int16",
|
||||
"numeric": "decimal",
|
||||
"integer": "integer",
|
||||
"bigint": "bigint",
|
||||
"smallint": "smallint",
|
||||
"int": "integer",
|
||||
"int2": "smallint",
|
||||
"int4": "integer",
|
||||
"int8": "bigint",
|
||||
"serial": "serial",
|
||||
"bigserial": "bigserial",
|
||||
"smallserial": "smallserial",
|
||||
"numeric": "numeric",
|
||||
"decimal": "decimal",
|
||||
"real": "float32",
|
||||
"double precision": "float64",
|
||||
"float4": "float32",
|
||||
"float8": "float64",
|
||||
"money": "decimal",
|
||||
"character varying": "string",
|
||||
"varchar": "string",
|
||||
"character": "string",
|
||||
"char": "string",
|
||||
"text": "string",
|
||||
"boolean": "bool",
|
||||
"bool": "bool",
|
||||
"real": "real",
|
||||
"double precision": "double precision",
|
||||
"float4": "real",
|
||||
"float8": "double precision",
|
||||
"money": "money",
|
||||
"character varying": "varchar",
|
||||
"varchar": "varchar",
|
||||
"character": "char",
|
||||
"char": "char",
|
||||
"text": "text",
|
||||
"boolean": "boolean",
|
||||
"bool": "boolean",
|
||||
"date": "date",
|
||||
"time": "time",
|
||||
"time without time zone": "time",
|
||||
@@ -306,7 +333,7 @@ func (r *Reader) mapDataType(pgType, udtName string) string {
|
||||
}
|
||||
|
||||
// Try mapped type first
|
||||
if mapped, exists := typeMap[pgType]; exists {
|
||||
if mapped, exists := typeMap[normalizedPGType]; exists {
|
||||
return mapped
|
||||
}
|
||||
|
||||
@@ -315,8 +342,11 @@ func (r *Reader) mapDataType(pgType, udtName string) string {
|
||||
return pgsql.GetSQLType(pgType)
|
||||
}
|
||||
|
||||
// Return UDT name for custom types
|
||||
// Return UDT name for custom types (including array fallback when needed)
|
||||
if udtName != "" {
|
||||
if strings.HasPrefix(udtName, "_") && len(udtName) > 1 {
|
||||
return udtName[1:] + "[]"
|
||||
}
|
||||
return udtName
|
||||
}
|
||||
|
||||
|
||||
@@ -173,35 +173,58 @@ func TestMapDataType(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
pgType string
|
||||
udtName string
|
||||
expected string
|
||||
pgType string
|
||||
udtName string
|
||||
formattedType string
|
||||
expected string
|
||||
}{
|
||||
{"integer", "int4", "int"},
|
||||
{"bigint", "int8", "int64"},
|
||||
{"smallint", "int2", "int16"},
|
||||
{"character varying", "varchar", "string"},
|
||||
{"text", "text", "string"},
|
||||
{"boolean", "bool", "bool"},
|
||||
{"timestamp without time zone", "timestamp", "timestamp"},
|
||||
{"timestamp with time zone", "timestamptz", "timestamptz"},
|
||||
{"json", "json", "json"},
|
||||
{"jsonb", "jsonb", "jsonb"},
|
||||
{"uuid", "uuid", "uuid"},
|
||||
{"numeric", "numeric", "decimal"},
|
||||
{"real", "float4", "float32"},
|
||||
{"double precision", "float8", "float64"},
|
||||
{"date", "date", "date"},
|
||||
{"time without time zone", "time", "time"},
|
||||
{"bytea", "bytea", "bytea"},
|
||||
{"unknown_type", "custom", "custom"}, // Should return UDT name
|
||||
{"integer", "int4", "", "integer"},
|
||||
{"bigint", "int8", "", "bigint"},
|
||||
{"smallint", "int2", "", "smallint"},
|
||||
{"character varying", "varchar", "", "varchar"},
|
||||
{"text", "text", "", "text"},
|
||||
{"boolean", "bool", "", "boolean"},
|
||||
{"timestamp without time zone", "timestamp", "", "timestamp"},
|
||||
{"timestamp with time zone", "timestamptz", "", "timestamptz"},
|
||||
{"json", "json", "", "json"},
|
||||
{"jsonb", "jsonb", "", "jsonb"},
|
||||
{"uuid", "uuid", "", "uuid"},
|
||||
{"numeric", "numeric", "", "numeric"},
|
||||
{"real", "float4", "", "real"},
|
||||
{"double precision", "float8", "", "double precision"},
|
||||
{"date", "date", "", "date"},
|
||||
{"time without time zone", "time", "", "time"},
|
||||
{"bytea", "bytea", "", "bytea"},
|
||||
{"unknown_type", "custom", "", "custom"}, // Should return UDT name
|
||||
{"ARRAY", "_text", "", "text[]"},
|
||||
{"USER-DEFINED", "vector", "vector(1536)", "vector(1536)"},
|
||||
{"character varying", "varchar", "character varying(255)", "character varying(255)"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.pgType, func(t *testing.T) {
|
||||
result := reader.mapDataType(tt.pgType, tt.udtName)
|
||||
result := reader.mapDataType(tt.pgType, tt.udtName, tt.formattedType, false)
|
||||
if result != tt.expected {
|
||||
t.Errorf("mapDataType(%s, %s) = %s, expected %s", tt.pgType, tt.udtName, result, tt.expected)
|
||||
t.Errorf("mapDataType(%s, %s, %s) = %s, expected %s", tt.pgType, tt.udtName, tt.formattedType, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test serial type detection with hasNextval=true
|
||||
serialTests := []struct {
|
||||
pgType string
|
||||
expected string
|
||||
}{
|
||||
{"integer", "serial"},
|
||||
{"bigint", "bigserial"},
|
||||
{"smallint", "smallserial"},
|
||||
}
|
||||
|
||||
for _, tt := range serialTests {
|
||||
t.Run(tt.pgType+"_with_nextval", func(t *testing.T) {
|
||||
result := reader.mapDataType(tt.pgType, "", "", true)
|
||||
if result != tt.expected {
|
||||
t.Errorf("mapDataType(%s, '', '', true) = %s, expected %s", tt.pgType, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -211,63 +234,63 @@ func TestParseIndexDefinition(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
indexName string
|
||||
tableName string
|
||||
schema string
|
||||
indexDef string
|
||||
wantType string
|
||||
wantUnique bool
|
||||
name string
|
||||
indexName string
|
||||
tableName string
|
||||
schema string
|
||||
indexDef string
|
||||
wantType string
|
||||
wantUnique bool
|
||||
wantColumns int
|
||||
}{
|
||||
{
|
||||
name: "simple btree index",
|
||||
indexName: "idx_users_email",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_email ON public.users USING btree (email)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
name: "simple btree index",
|
||||
indexName: "idx_users_email",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_email ON public.users USING btree (email)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
wantColumns: 1,
|
||||
},
|
||||
{
|
||||
name: "unique index",
|
||||
indexName: "idx_users_username",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE UNIQUE INDEX idx_users_username ON public.users USING btree (username)",
|
||||
wantType: "btree",
|
||||
wantUnique: true,
|
||||
name: "unique index",
|
||||
indexName: "idx_users_username",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE UNIQUE INDEX idx_users_username ON public.users USING btree (username)",
|
||||
wantType: "btree",
|
||||
wantUnique: true,
|
||||
wantColumns: 1,
|
||||
},
|
||||
{
|
||||
name: "composite index",
|
||||
indexName: "idx_users_name",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_name ON public.users USING btree (first_name, last_name)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
name: "composite index",
|
||||
indexName: "idx_users_name",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_name ON public.users USING btree (first_name, last_name)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
wantColumns: 2,
|
||||
},
|
||||
{
|
||||
name: "gin index",
|
||||
indexName: "idx_posts_tags",
|
||||
tableName: "posts",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_posts_tags ON public.posts USING gin (tags)",
|
||||
wantType: "gin",
|
||||
wantUnique: false,
|
||||
name: "gin index",
|
||||
indexName: "idx_posts_tags",
|
||||
tableName: "posts",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_posts_tags ON public.posts USING gin (tags)",
|
||||
wantType: "gin",
|
||||
wantUnique: false,
|
||||
wantColumns: 1,
|
||||
},
|
||||
{
|
||||
name: "partial index with where clause",
|
||||
indexName: "idx_users_active",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_active ON public.users USING btree (id) WHERE (active = true)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
name: "partial index with where clause",
|
||||
indexName: "idx_users_active",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_active ON public.users USING btree (id) WHERE (active = true)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
wantColumns: 1,
|
||||
},
|
||||
}
|
||||
|
||||
75
pkg/readers/sqlite/README.md
Normal file
75
pkg/readers/sqlite/README.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# SQLite Reader
|
||||
|
||||
Reads database schema from SQLite database files.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/sqlite"
|
||||
)
|
||||
|
||||
// Using file path
|
||||
options := &readers.ReaderOptions{
|
||||
FilePath: "path/to/database.db",
|
||||
}
|
||||
|
||||
reader := sqlite.NewReader(options)
|
||||
db, err := reader.ReadDatabase()
|
||||
|
||||
// Or using connection string
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: "path/to/database.db",
|
||||
}
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- Reads tables with columns and data types
|
||||
- Reads views with definitions
|
||||
- Reads primary keys
|
||||
- Reads foreign keys with CASCADE actions
|
||||
- Reads indexes (non-auto-generated)
|
||||
- Maps SQLite types to canonical types
|
||||
- Derives relationships from foreign keys
|
||||
|
||||
## SQLite Specifics
|
||||
|
||||
- SQLite doesn't support schemas, creates single "main" schema
|
||||
- Uses pure Go driver (modernc.org/sqlite) - no CGo required
|
||||
- Supports both file path and connection string
|
||||
- Auto-increment detection for INTEGER PRIMARY KEY columns
|
||||
- Foreign keys require `PRAGMA foreign_keys = ON` to be set
|
||||
|
||||
## Example Schema
|
||||
|
||||
```sql
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
CREATE TABLE users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username VARCHAR(50) NOT NULL UNIQUE,
|
||||
email VARCHAR(100) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE posts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id INTEGER NOT NULL,
|
||||
title VARCHAR(200) NOT NULL,
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
```
|
||||
|
||||
## Type Mappings
|
||||
|
||||
| SQLite Type | Canonical Type |
|
||||
|-------------|---------------|
|
||||
| INTEGER, INT | int |
|
||||
| BIGINT | int64 |
|
||||
| REAL, DOUBLE | float64 |
|
||||
| TEXT, VARCHAR | string |
|
||||
| BLOB | bytea |
|
||||
| BOOLEAN | bool |
|
||||
| DATE | date |
|
||||
| DATETIME, TIMESTAMP | timestamp |
|
||||
306
pkg/readers/sqlite/queries.go
Normal file
306
pkg/readers/sqlite/queries.go
Normal file
@@ -0,0 +1,306 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
)
|
||||
|
||||
// queryTables retrieves all tables from the SQLite database
|
||||
func (r *Reader) queryTables() ([]*models.Table, error) {
|
||||
query := `
|
||||
SELECT name
|
||||
FROM sqlite_master
|
||||
WHERE type = 'table'
|
||||
AND name NOT LIKE 'sqlite_%'
|
||||
ORDER BY name
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
tables := make([]*models.Table, 0)
|
||||
for rows.Next() {
|
||||
var tableName string
|
||||
|
||||
if err := rows.Scan(&tableName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
table := models.InitTable(tableName, "main")
|
||||
tables = append(tables, table)
|
||||
}
|
||||
|
||||
return tables, rows.Err()
|
||||
}
|
||||
|
||||
// queryViews retrieves all views from the SQLite database
|
||||
func (r *Reader) queryViews() ([]*models.View, error) {
|
||||
query := `
|
||||
SELECT name, sql
|
||||
FROM sqlite_master
|
||||
WHERE type = 'view'
|
||||
ORDER BY name
|
||||
`
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
views := make([]*models.View, 0)
|
||||
for rows.Next() {
|
||||
var viewName string
|
||||
var sql *string
|
||||
|
||||
if err := rows.Scan(&viewName, &sql); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
view := models.InitView(viewName, "main")
|
||||
if sql != nil {
|
||||
view.Definition = *sql
|
||||
}
|
||||
|
||||
views = append(views, view)
|
||||
}
|
||||
|
||||
return views, rows.Err()
|
||||
}
|
||||
|
||||
// queryColumns retrieves all columns for a given table or view
|
||||
func (r *Reader) queryColumns(tableName string) (map[string]*models.Column, error) {
|
||||
query := fmt.Sprintf("PRAGMA table_info(%s)", tableName)
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
columns := make(map[string]*models.Column)
|
||||
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var name, dataType string
|
||||
var notNull, pk int
|
||||
var defaultValue *string
|
||||
|
||||
if err := rows.Scan(&cid, &name, &dataType, ¬Null, &defaultValue, &pk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
column := models.InitColumn(name, tableName, "main")
|
||||
column.Type = r.mapDataType(strings.ToUpper(dataType))
|
||||
column.NotNull = (notNull == 1)
|
||||
column.IsPrimaryKey = (pk > 0)
|
||||
column.Sequence = uint(cid + 1)
|
||||
|
||||
if defaultValue != nil {
|
||||
column.Default = *defaultValue
|
||||
}
|
||||
|
||||
// Check for autoincrement (SQLite uses INTEGER PRIMARY KEY AUTOINCREMENT)
|
||||
if pk > 0 && strings.EqualFold(dataType, "INTEGER") {
|
||||
column.AutoIncrement = r.isAutoIncrement(tableName, name)
|
||||
}
|
||||
|
||||
columns[name] = column
|
||||
}
|
||||
|
||||
return columns, rows.Err()
|
||||
}
|
||||
|
||||
// isAutoIncrement checks if a column is autoincrement
|
||||
func (r *Reader) isAutoIncrement(tableName, columnName string) bool {
|
||||
// Check sqlite_sequence table or parse CREATE TABLE statement
|
||||
query := `
|
||||
SELECT sql
|
||||
FROM sqlite_master
|
||||
WHERE type = 'table' AND name = ?
|
||||
`
|
||||
|
||||
var sql string
|
||||
err := r.db.QueryRowContext(r.ctx, query, tableName).Scan(&sql)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if the SQL contains AUTOINCREMENT for this column
|
||||
return strings.Contains(strings.ToUpper(sql), strings.ToUpper(columnName)+" INTEGER PRIMARY KEY AUTOINCREMENT") ||
|
||||
strings.Contains(strings.ToUpper(sql), strings.ToUpper(columnName)+" INTEGER AUTOINCREMENT")
|
||||
}
|
||||
|
||||
// queryPrimaryKey retrieves the primary key constraint for a table
|
||||
func (r *Reader) queryPrimaryKey(tableName string) (*models.Constraint, error) {
|
||||
query := fmt.Sprintf("PRAGMA table_info(%s)", tableName)
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var pkColumns []string
|
||||
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var name, dataType string
|
||||
var notNull, pk int
|
||||
var defaultValue *string
|
||||
|
||||
if err := rows.Scan(&cid, &name, &dataType, ¬Null, &defaultValue, &pk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pk > 0 {
|
||||
pkColumns = append(pkColumns, name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(pkColumns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Create primary key constraint
|
||||
constraintName := fmt.Sprintf("%s_pkey", tableName)
|
||||
constraint := models.InitConstraint(constraintName, models.PrimaryKeyConstraint)
|
||||
constraint.Schema = "main"
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = pkColumns
|
||||
|
||||
return constraint, rows.Err()
|
||||
}
|
||||
|
||||
// queryForeignKeys retrieves all foreign key constraints for a table
|
||||
func (r *Reader) queryForeignKeys(tableName string) ([]*models.Constraint, error) {
|
||||
query := fmt.Sprintf("PRAGMA foreign_key_list(%s)", tableName)
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Group foreign keys by id (since composite FKs have multiple rows)
|
||||
fkMap := make(map[int]*models.Constraint)
|
||||
|
||||
for rows.Next() {
|
||||
var id, seq int
|
||||
var referencedTable, fromColumn, toColumn string
|
||||
var onUpdate, onDelete, match string
|
||||
|
||||
if err := rows.Scan(&id, &seq, &referencedTable, &fromColumn, &toColumn, &onUpdate, &onDelete, &match); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, exists := fkMap[id]; !exists {
|
||||
constraintName := fmt.Sprintf("%s_%s_fkey", tableName, referencedTable)
|
||||
if id > 0 {
|
||||
constraintName = fmt.Sprintf("%s_%s_fkey_%d", tableName, referencedTable, id)
|
||||
}
|
||||
|
||||
constraint := models.InitConstraint(constraintName, models.ForeignKeyConstraint)
|
||||
constraint.Schema = "main"
|
||||
constraint.Table = tableName
|
||||
constraint.ReferencedSchema = "main"
|
||||
constraint.ReferencedTable = referencedTable
|
||||
constraint.OnUpdate = onUpdate
|
||||
constraint.OnDelete = onDelete
|
||||
constraint.Columns = []string{}
|
||||
constraint.ReferencedColumns = []string{}
|
||||
|
||||
fkMap[id] = constraint
|
||||
}
|
||||
|
||||
// Add column to the constraint
|
||||
fkMap[id].Columns = append(fkMap[id].Columns, fromColumn)
|
||||
fkMap[id].ReferencedColumns = append(fkMap[id].ReferencedColumns, toColumn)
|
||||
}
|
||||
|
||||
// Convert map to slice
|
||||
foreignKeys := make([]*models.Constraint, 0, len(fkMap))
|
||||
for _, fk := range fkMap {
|
||||
foreignKeys = append(foreignKeys, fk)
|
||||
}
|
||||
|
||||
return foreignKeys, rows.Err()
|
||||
}
|
||||
|
||||
// queryIndexes retrieves all indexes for a table
|
||||
func (r *Reader) queryIndexes(tableName string) ([]*models.Index, error) {
|
||||
query := fmt.Sprintf("PRAGMA index_list(%s)", tableName)
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
indexes := make([]*models.Index, 0)
|
||||
|
||||
for rows.Next() {
|
||||
var seq int
|
||||
var name string
|
||||
var unique int
|
||||
var origin string
|
||||
var partial int
|
||||
|
||||
if err := rows.Scan(&seq, &name, &unique, &origin, &partial); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Skip auto-generated indexes (origin = 'pk' for primary keys, etc.)
|
||||
// origin: c = CREATE INDEX, u = UNIQUE constraint, pk = PRIMARY KEY
|
||||
if origin == "pk" || origin == "u" {
|
||||
continue
|
||||
}
|
||||
|
||||
index := models.InitIndex(name, tableName, "main")
|
||||
index.Unique = (unique == 1)
|
||||
|
||||
// Get index columns
|
||||
columns, err := r.queryIndexColumns(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index.Columns = columns
|
||||
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
|
||||
return indexes, rows.Err()
|
||||
}
|
||||
|
||||
// queryIndexColumns retrieves the columns for a specific index
|
||||
func (r *Reader) queryIndexColumns(indexName string) ([]string, error) {
|
||||
query := fmt.Sprintf("PRAGMA index_info(%s)", indexName)
|
||||
|
||||
rows, err := r.db.QueryContext(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
columns := make([]string, 0)
|
||||
|
||||
for rows.Next() {
|
||||
var seqno, cid int
|
||||
var name *string
|
||||
|
||||
if err := rows.Scan(&seqno, &cid, &name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if name != nil {
|
||||
columns = append(columns, *name)
|
||||
}
|
||||
}
|
||||
|
||||
return columns, rows.Err()
|
||||
}
|
||||
261
pkg/readers/sqlite/reader.go
Normal file
261
pkg/readers/sqlite/reader.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
_ "modernc.org/sqlite" // SQLite driver
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
// Reader implements the readers.Reader interface for SQLite databases
|
||||
type Reader struct {
|
||||
options *readers.ReaderOptions
|
||||
db *sql.DB
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewReader creates a new SQLite reader
|
||||
func NewReader(options *readers.ReaderOptions) *Reader {
|
||||
return &Reader{
|
||||
options: options,
|
||||
ctx: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
// ReadDatabase reads the entire database schema from SQLite
|
||||
func (r *Reader) ReadDatabase() (*models.Database, error) {
|
||||
// Validate file path or connection string
|
||||
dbPath := r.options.FilePath
|
||||
if dbPath == "" && r.options.ConnectionString != "" {
|
||||
dbPath = r.options.ConnectionString
|
||||
}
|
||||
if dbPath == "" {
|
||||
return nil, fmt.Errorf("file path or connection string is required")
|
||||
}
|
||||
|
||||
// Connect to the database
|
||||
if err := r.connect(dbPath); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
defer r.close()
|
||||
|
||||
// Get database name from file path
|
||||
dbName := filepath.Base(dbPath)
|
||||
if dbName == "" {
|
||||
dbName = "sqlite"
|
||||
}
|
||||
|
||||
// Initialize database model
|
||||
db := models.InitDatabase(dbName)
|
||||
db.DatabaseType = models.SqlLiteDatabaseType
|
||||
db.SourceFormat = "sqlite"
|
||||
|
||||
// Get SQLite version
|
||||
var version string
|
||||
err := r.db.QueryRowContext(r.ctx, "SELECT sqlite_version()").Scan(&version)
|
||||
if err == nil {
|
||||
db.DatabaseVersion = version
|
||||
}
|
||||
|
||||
// SQLite doesn't have schemas, so we create a single "main" schema
|
||||
schema := models.InitSchema("main")
|
||||
schema.RefDatabase = db
|
||||
|
||||
// Query tables
|
||||
tables, err := r.queryTables()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query tables: %w", err)
|
||||
}
|
||||
schema.Tables = tables
|
||||
|
||||
// Query views
|
||||
views, err := r.queryViews()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query views: %w", err)
|
||||
}
|
||||
schema.Views = views
|
||||
|
||||
// Query columns for tables and views
|
||||
for _, table := range schema.Tables {
|
||||
columns, err := r.queryColumns(table.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query columns for table %s: %w", table.Name, err)
|
||||
}
|
||||
table.Columns = columns
|
||||
table.RefSchema = schema
|
||||
|
||||
// Query primary key
|
||||
pk, err := r.queryPrimaryKey(table.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query primary key for table %s: %w", table.Name, err)
|
||||
}
|
||||
if pk != nil {
|
||||
table.Constraints[pk.Name] = pk
|
||||
// Mark columns as primary key and not null
|
||||
for _, colName := range pk.Columns {
|
||||
if col, exists := table.Columns[colName]; exists {
|
||||
col.IsPrimaryKey = true
|
||||
col.NotNull = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query foreign keys
|
||||
foreignKeys, err := r.queryForeignKeys(table.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query foreign keys for table %s: %w", table.Name, err)
|
||||
}
|
||||
for _, fk := range foreignKeys {
|
||||
table.Constraints[fk.Name] = fk
|
||||
// Derive relationship from foreign key
|
||||
r.deriveRelationship(table, fk)
|
||||
}
|
||||
|
||||
// Query indexes
|
||||
indexes, err := r.queryIndexes(table.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query indexes for table %s: %w", table.Name, err)
|
||||
}
|
||||
for _, idx := range indexes {
|
||||
table.Indexes[idx.Name] = idx
|
||||
}
|
||||
}
|
||||
|
||||
// Query columns for views
|
||||
for _, view := range schema.Views {
|
||||
columns, err := r.queryColumns(view.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query columns for view %s: %w", view.Name, err)
|
||||
}
|
||||
view.Columns = columns
|
||||
view.RefSchema = schema
|
||||
}
|
||||
|
||||
// Add schema to database
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// ReadSchema reads a single schema (returns the main schema from the database)
|
||||
func (r *Reader) ReadSchema() (*models.Schema, error) {
|
||||
db, err := r.ReadDatabase()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(db.Schemas) == 0 {
|
||||
return nil, fmt.Errorf("no schemas found in database")
|
||||
}
|
||||
return db.Schemas[0], nil
|
||||
}
|
||||
|
||||
// ReadTable reads a single table (returns the first table from the schema)
|
||||
func (r *Reader) ReadTable() (*models.Table, error) {
|
||||
schema, err := r.ReadSchema()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(schema.Tables) == 0 {
|
||||
return nil, fmt.Errorf("no tables found in schema")
|
||||
}
|
||||
return schema.Tables[0], nil
|
||||
}
|
||||
|
||||
// connect establishes a connection to the SQLite database
|
||||
func (r *Reader) connect(dbPath string) error {
|
||||
db, err := sql.Open("sqlite", dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// close closes the database connection
|
||||
func (r *Reader) close() {
|
||||
if r.db != nil {
|
||||
r.db.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// mapDataType maps SQLite data types to canonical types
|
||||
func (r *Reader) mapDataType(sqliteType string) string {
|
||||
// SQLite has a flexible type system, but we map common types
|
||||
typeMap := map[string]string{
|
||||
"INTEGER": "int",
|
||||
"INT": "int",
|
||||
"TINYINT": "int8",
|
||||
"SMALLINT": "int16",
|
||||
"MEDIUMINT": "int",
|
||||
"BIGINT": "int64",
|
||||
"UNSIGNED BIG INT": "uint64",
|
||||
"INT2": "int16",
|
||||
"INT8": "int64",
|
||||
"REAL": "float64",
|
||||
"DOUBLE": "float64",
|
||||
"DOUBLE PRECISION": "float64",
|
||||
"FLOAT": "float32",
|
||||
"NUMERIC": "decimal",
|
||||
"DECIMAL": "decimal",
|
||||
"BOOLEAN": "bool",
|
||||
"BOOL": "bool",
|
||||
"DATE": "date",
|
||||
"DATETIME": "timestamp",
|
||||
"TIMESTAMP": "timestamp",
|
||||
"TEXT": "string",
|
||||
"VARCHAR": "string",
|
||||
"CHAR": "string",
|
||||
"CHARACTER": "string",
|
||||
"VARYING CHARACTER": "string",
|
||||
"NCHAR": "string",
|
||||
"NVARCHAR": "string",
|
||||
"CLOB": "text",
|
||||
"BLOB": "bytea",
|
||||
}
|
||||
|
||||
// Try exact match first
|
||||
if mapped, exists := typeMap[sqliteType]; exists {
|
||||
return mapped
|
||||
}
|
||||
|
||||
// Try case-insensitive match for common types
|
||||
sqliteTypeUpper := sqliteType
|
||||
if len(sqliteType) > 0 {
|
||||
// Extract base type (e.g., "VARCHAR(255)" -> "VARCHAR")
|
||||
for baseType := range typeMap {
|
||||
if len(sqliteTypeUpper) >= len(baseType) && sqliteTypeUpper[:len(baseType)] == baseType {
|
||||
return typeMap[baseType]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default to string for unknown types
|
||||
return "string"
|
||||
}
|
||||
|
||||
// deriveRelationship creates a relationship from a foreign key constraint
|
||||
func (r *Reader) deriveRelationship(table *models.Table, fk *models.Constraint) {
|
||||
relationshipName := fmt.Sprintf("%s_to_%s", table.Name, fk.ReferencedTable)
|
||||
|
||||
relationship := models.InitRelationship(relationshipName, models.OneToMany)
|
||||
relationship.FromTable = table.Name
|
||||
relationship.FromSchema = table.Schema
|
||||
relationship.ToTable = fk.ReferencedTable
|
||||
relationship.ToSchema = fk.ReferencedSchema
|
||||
relationship.ForeignKey = fk.Name
|
||||
|
||||
// Store constraint actions in properties
|
||||
if fk.OnDelete != "" {
|
||||
relationship.Properties["on_delete"] = fk.OnDelete
|
||||
}
|
||||
if fk.OnUpdate != "" {
|
||||
relationship.Properties["on_update"] = fk.OnUpdate
|
||||
}
|
||||
|
||||
table.Relationships[relationshipName] = relationship
|
||||
}
|
||||
334
pkg/readers/sqlite/reader_test.go
Normal file
334
pkg/readers/sqlite/reader_test.go
Normal file
@@ -0,0 +1,334 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
// setupTestDatabase creates a temporary SQLite database with test data
|
||||
func setupTestDatabase(t *testing.T) string {
|
||||
tmpDir := t.TempDir()
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
db, err := sql.Open("sqlite", dbPath)
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
// Create test schema
|
||||
schema := `
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
CREATE TABLE users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username VARCHAR(50) NOT NULL UNIQUE,
|
||||
email VARCHAR(100) NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE posts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id INTEGER NOT NULL,
|
||||
title VARCHAR(200) NOT NULL,
|
||||
content TEXT,
|
||||
published BOOLEAN DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE TABLE comments (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
post_id INTEGER NOT NULL,
|
||||
user_id INTEGER NOT NULL,
|
||||
comment TEXT NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (post_id) REFERENCES posts(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_posts_user_id ON posts(user_id);
|
||||
CREATE INDEX idx_comments_post_id ON comments(post_id);
|
||||
CREATE UNIQUE INDEX idx_users_email ON users(email);
|
||||
|
||||
CREATE VIEW user_post_count AS
|
||||
SELECT u.id, u.username, COUNT(p.id) as post_count
|
||||
FROM users u
|
||||
LEFT JOIN posts p ON u.id = p.user_id
|
||||
GROUP BY u.id, u.username;
|
||||
`
|
||||
|
||||
_, err = db.Exec(schema)
|
||||
require.NoError(t, err)
|
||||
|
||||
return dbPath
|
||||
}
|
||||
|
||||
func TestReader_ReadDatabase(t *testing.T) {
|
||||
dbPath := setupTestDatabase(t)
|
||||
defer os.Remove(dbPath)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
FilePath: dbPath,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
db, err := reader.ReadDatabase()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, db)
|
||||
|
||||
// Check database metadata
|
||||
assert.Equal(t, "test.db", db.Name)
|
||||
assert.Equal(t, models.SqlLiteDatabaseType, db.DatabaseType)
|
||||
assert.Equal(t, "sqlite", db.SourceFormat)
|
||||
assert.NotEmpty(t, db.DatabaseVersion)
|
||||
|
||||
// Check schemas (SQLite should have a single "main" schema)
|
||||
require.Len(t, db.Schemas, 1)
|
||||
schema := db.Schemas[0]
|
||||
assert.Equal(t, "main", schema.Name)
|
||||
|
||||
// Check tables
|
||||
assert.Len(t, schema.Tables, 3)
|
||||
tableNames := make([]string, len(schema.Tables))
|
||||
for i, table := range schema.Tables {
|
||||
tableNames[i] = table.Name
|
||||
}
|
||||
assert.Contains(t, tableNames, "users")
|
||||
assert.Contains(t, tableNames, "posts")
|
||||
assert.Contains(t, tableNames, "comments")
|
||||
|
||||
// Check views
|
||||
assert.Len(t, schema.Views, 1)
|
||||
assert.Equal(t, "user_post_count", schema.Views[0].Name)
|
||||
assert.NotEmpty(t, schema.Views[0].Definition)
|
||||
}
|
||||
|
||||
func TestReader_ReadTable_Users(t *testing.T) {
|
||||
dbPath := setupTestDatabase(t)
|
||||
defer os.Remove(dbPath)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
FilePath: dbPath,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
db, err := reader.ReadDatabase()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, db)
|
||||
|
||||
// Find users table
|
||||
var usersTable *models.Table
|
||||
for _, table := range db.Schemas[0].Tables {
|
||||
if table.Name == "users" {
|
||||
usersTable = table
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, usersTable)
|
||||
assert.Equal(t, "users", usersTable.Name)
|
||||
assert.Equal(t, "main", usersTable.Schema)
|
||||
|
||||
// Check columns
|
||||
assert.Len(t, usersTable.Columns, 4)
|
||||
|
||||
// Check id column
|
||||
idCol, exists := usersTable.Columns["id"]
|
||||
require.True(t, exists)
|
||||
assert.Equal(t, "int", idCol.Type)
|
||||
assert.True(t, idCol.IsPrimaryKey)
|
||||
assert.True(t, idCol.AutoIncrement)
|
||||
assert.True(t, idCol.NotNull)
|
||||
|
||||
// Check username column
|
||||
usernameCol, exists := usersTable.Columns["username"]
|
||||
require.True(t, exists)
|
||||
assert.Equal(t, "string", usernameCol.Type)
|
||||
assert.True(t, usernameCol.NotNull)
|
||||
assert.False(t, usernameCol.IsPrimaryKey)
|
||||
|
||||
// Check email column
|
||||
emailCol, exists := usersTable.Columns["email"]
|
||||
require.True(t, exists)
|
||||
assert.Equal(t, "string", emailCol.Type)
|
||||
assert.True(t, emailCol.NotNull)
|
||||
|
||||
// Check primary key constraint
|
||||
assert.Len(t, usersTable.Constraints, 1)
|
||||
pkConstraint, exists := usersTable.Constraints["users_pkey"]
|
||||
require.True(t, exists)
|
||||
assert.Equal(t, models.PrimaryKeyConstraint, pkConstraint.Type)
|
||||
assert.Equal(t, []string{"id"}, pkConstraint.Columns)
|
||||
|
||||
// Check indexes (should have unique index on email and username)
|
||||
assert.GreaterOrEqual(t, len(usersTable.Indexes), 1)
|
||||
}
|
||||
|
||||
func TestReader_ReadTable_Posts(t *testing.T) {
|
||||
dbPath := setupTestDatabase(t)
|
||||
defer os.Remove(dbPath)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
FilePath: dbPath,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
db, err := reader.ReadDatabase()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, db)
|
||||
|
||||
// Find posts table
|
||||
var postsTable *models.Table
|
||||
for _, table := range db.Schemas[0].Tables {
|
||||
if table.Name == "posts" {
|
||||
postsTable = table
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, postsTable)
|
||||
|
||||
// Check columns
|
||||
assert.Len(t, postsTable.Columns, 6)
|
||||
|
||||
// Check foreign key constraint
|
||||
hasForeignKey := false
|
||||
for _, constraint := range postsTable.Constraints {
|
||||
if constraint.Type == models.ForeignKeyConstraint {
|
||||
hasForeignKey = true
|
||||
assert.Equal(t, "users", constraint.ReferencedTable)
|
||||
assert.Equal(t, "CASCADE", constraint.OnDelete)
|
||||
}
|
||||
}
|
||||
assert.True(t, hasForeignKey, "Posts table should have a foreign key constraint")
|
||||
|
||||
// Check relationships
|
||||
assert.GreaterOrEqual(t, len(postsTable.Relationships), 1)
|
||||
|
||||
// Check indexes
|
||||
hasUserIdIndex := false
|
||||
for _, index := range postsTable.Indexes {
|
||||
if index.Name == "idx_posts_user_id" {
|
||||
hasUserIdIndex = true
|
||||
assert.Contains(t, index.Columns, "user_id")
|
||||
}
|
||||
}
|
||||
assert.True(t, hasUserIdIndex, "Posts table should have idx_posts_user_id index")
|
||||
}
|
||||
|
||||
func TestReader_ReadTable_Comments(t *testing.T) {
|
||||
dbPath := setupTestDatabase(t)
|
||||
defer os.Remove(dbPath)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
FilePath: dbPath,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
db, err := reader.ReadDatabase()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, db)
|
||||
|
||||
// Find comments table
|
||||
var commentsTable *models.Table
|
||||
for _, table := range db.Schemas[0].Tables {
|
||||
if table.Name == "comments" {
|
||||
commentsTable = table
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, commentsTable)
|
||||
|
||||
// Check foreign key constraints (should have 2)
|
||||
fkCount := 0
|
||||
for _, constraint := range commentsTable.Constraints {
|
||||
if constraint.Type == models.ForeignKeyConstraint {
|
||||
fkCount++
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 2, fkCount, "Comments table should have 2 foreign key constraints")
|
||||
}
|
||||
|
||||
func TestReader_ReadSchema(t *testing.T) {
|
||||
dbPath := setupTestDatabase(t)
|
||||
defer os.Remove(dbPath)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
FilePath: dbPath,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
schema, err := reader.ReadSchema()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, schema)
|
||||
assert.Equal(t, "main", schema.Name)
|
||||
assert.Len(t, schema.Tables, 3)
|
||||
assert.Len(t, schema.Views, 1)
|
||||
}
|
||||
|
||||
func TestReader_ReadTable(t *testing.T) {
|
||||
dbPath := setupTestDatabase(t)
|
||||
defer os.Remove(dbPath)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
FilePath: dbPath,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
table, err := reader.ReadTable()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, table)
|
||||
assert.NotEmpty(t, table.Name)
|
||||
assert.NotEmpty(t, table.Columns)
|
||||
}
|
||||
|
||||
func TestReader_ConnectionString(t *testing.T) {
|
||||
dbPath := setupTestDatabase(t)
|
||||
defer os.Remove(dbPath)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: dbPath,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
db, err := reader.ReadDatabase()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, db)
|
||||
assert.Len(t, db.Schemas, 1)
|
||||
}
|
||||
|
||||
func TestReader_InvalidPath(t *testing.T) {
|
||||
options := &readers.ReaderOptions{
|
||||
FilePath: "/nonexistent/path/to/database.db",
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
_, err := reader.ReadDatabase()
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestReader_MissingPath(t *testing.T) {
|
||||
options := &readers.ReaderOptions{}
|
||||
|
||||
reader := NewReader(options)
|
||||
_, err := reader.ReadDatabase()
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "file path or connection string is required")
|
||||
}
|
||||
@@ -5,9 +5,11 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
@@ -549,6 +551,41 @@ func (r *Reader) parseColumnOptions(decorator string, column *models.Column, tab
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve explicit type modifiers from options where present.
|
||||
// Example: @Column({ type: 'varchar', length: 255 }) -> varchar(255)
|
||||
if column.Type != "" && !strings.Contains(column.Type, "(") {
|
||||
lengthRegex := regexp.MustCompile(`length:\s*(\d+)`)
|
||||
precisionRegex := regexp.MustCompile(`precision:\s*(\d+)`)
|
||||
scaleRegex := regexp.MustCompile(`scale:\s*(\d+)`)
|
||||
|
||||
baseType := strings.ToLower(strings.TrimSpace(column.Type))
|
||||
|
||||
if pgsql.SupportsLength(baseType) {
|
||||
if matches := lengthRegex.FindStringSubmatch(content); len(matches) == 2 {
|
||||
if n, err := strconv.Atoi(matches[1]); err == nil && n > 0 {
|
||||
column.Length = n
|
||||
column.Type = fmt.Sprintf("%s(%d)", column.Type, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pgsql.SupportsPrecision(baseType) {
|
||||
if matches := precisionRegex.FindStringSubmatch(content); len(matches) == 2 {
|
||||
if p, err := strconv.Atoi(matches[1]); err == nil && p > 0 {
|
||||
column.Precision = p
|
||||
if sm := scaleRegex.FindStringSubmatch(content); len(sm) == 2 {
|
||||
if s, err := strconv.Atoi(sm[1]); err == nil && s >= 0 {
|
||||
column.Scale = s
|
||||
column.Type = fmt.Sprintf("%s(%d,%d)", column.Type, p, s)
|
||||
}
|
||||
} else {
|
||||
column.Type = fmt.Sprintf("%s(%d)", column.Type, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(content, "nullable: true") || strings.Contains(content, "nullable:true") {
|
||||
column.NotNull = false
|
||||
}
|
||||
|
||||
60
pkg/readers/typeorm/reader_test.go
Normal file
60
pkg/readers/typeorm/reader_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package typeorm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
)
|
||||
|
||||
func TestParseColumnOptions_PreservesTypeModifiers(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
table := models.InitTable("users", "public")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
decorator string
|
||||
wantType string
|
||||
wantLength int
|
||||
wantPrecision int
|
||||
wantScale int
|
||||
}{
|
||||
{
|
||||
name: "varchar with length",
|
||||
decorator: `@Column({ type: 'varchar', length: 255 })`,
|
||||
wantType: "varchar(255)",
|
||||
wantLength: 255,
|
||||
},
|
||||
{
|
||||
name: "numeric with precision and scale",
|
||||
decorator: `@Column({ type: 'numeric', precision: 10, scale: 2 })`,
|
||||
wantType: "numeric(10,2)",
|
||||
wantPrecision: 10,
|
||||
wantScale: 2,
|
||||
},
|
||||
{
|
||||
name: "custom type with explicit modifier is preserved",
|
||||
decorator: `@Column({ type: 'vector(1536)' })`,
|
||||
wantType: "vector(1536)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
col := models.InitColumn("sample", table.Name, table.Schema)
|
||||
reader.parseColumnOptions(tt.decorator, col, table)
|
||||
|
||||
if col.Type != tt.wantType {
|
||||
t.Fatalf("column type = %q, want %q", col.Type, tt.wantType)
|
||||
}
|
||||
if col.Length != tt.wantLength {
|
||||
t.Fatalf("column length = %d, want %d", col.Length, tt.wantLength)
|
||||
}
|
||||
if col.Precision != tt.wantPrecision {
|
||||
t.Fatalf("column precision = %d, want %d", col.Precision, tt.wantPrecision)
|
||||
}
|
||||
if col.Scale != tt.wantScale {
|
||||
t.Fatalf("column scale = %d, want %d", col.Scale, tt.wantScale)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
36
pkg/reflectutil/doc.go
Normal file
36
pkg/reflectutil/doc.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Package reflectutil provides reflection utilities for analyzing Go code structures.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The reflectutil package offers helper functions for working with Go's reflection
|
||||
// capabilities, particularly for parsing Go struct definitions and extracting type
|
||||
// information. This is used by readers that parse ORM model files.
|
||||
//
|
||||
// # Features
|
||||
//
|
||||
// - Struct tag parsing and extraction
|
||||
// - Type information analysis
|
||||
// - Field metadata extraction
|
||||
// - ORM tag interpretation (GORM, Bun, etc.)
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// This package is primarily used internally by readers like GORM and Bun to parse
|
||||
// Go struct definitions and convert them to database schema models.
|
||||
//
|
||||
// // Example: Parse struct tags
|
||||
// tags := reflectutil.ParseStructTags(field)
|
||||
// columnName := tags.Get("db")
|
||||
//
|
||||
// # Supported ORM Tags
|
||||
//
|
||||
// The package understands tag conventions from:
|
||||
// - GORM (gorm tag)
|
||||
// - Bun (bun tag)
|
||||
// - Standard database/sql (db tag)
|
||||
//
|
||||
// # Purpose
|
||||
//
|
||||
// This package enables RelSpec to read existing ORM models and convert them to
|
||||
// a unified schema representation for transformation to other formats.
|
||||
package reflectutil
|
||||
34
pkg/transform/doc.go
Normal file
34
pkg/transform/doc.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// Package transform provides validation and transformation utilities for database models.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The transform package contains a Transformer type that provides methods for validating
|
||||
// and normalizing database schemas. It ensures schema correctness and consistency across
|
||||
// different format conversions.
|
||||
//
|
||||
// # Features
|
||||
//
|
||||
// - Database validation (structure and naming conventions)
|
||||
// - Schema validation (completeness and integrity)
|
||||
// - Table validation (column definitions and constraints)
|
||||
// - Data type normalization
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// transformer := transform.NewTransformer()
|
||||
// err := transformer.ValidateDatabase(db)
|
||||
// if err != nil {
|
||||
// log.Fatal("Invalid database schema:", err)
|
||||
// }
|
||||
//
|
||||
// # Validation Scope
|
||||
//
|
||||
// The transformer validates:
|
||||
// - Required fields presence
|
||||
// - Naming convention adherence
|
||||
// - Data type compatibility
|
||||
// - Constraint consistency
|
||||
// - Relationship integrity
|
||||
//
|
||||
// Note: Some validation methods are currently stubs and will be implemented as needed.
|
||||
package transform
|
||||
57
pkg/ui/doc.go
Normal file
57
pkg/ui/doc.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Package ui provides an interactive terminal user interface (TUI) for editing database schemas.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The ui package implements a full-featured terminal-based schema editor using tview,
|
||||
// allowing users to visually create, modify, and manage database schemas without writing
|
||||
// code or SQL.
|
||||
//
|
||||
// # Features
|
||||
//
|
||||
// The schema editor supports:
|
||||
// - Database management: Edit name, description, and properties
|
||||
// - Schema management: Create, edit, delete schemas
|
||||
// - Table management: Create, edit, delete tables
|
||||
// - Column management: Add, modify, delete columns with full property support
|
||||
// - Relationship management: Define and edit table relationships
|
||||
// - Domain management: Organize tables into logical domains
|
||||
// - Import & merge: Combine schemas from multiple sources
|
||||
// - Save: Export to any supported format
|
||||
//
|
||||
// # Architecture
|
||||
//
|
||||
// The package is organized into several components:
|
||||
// - editor.go: Main editor and application lifecycle
|
||||
// - *_screens.go: UI screens for each entity type
|
||||
// - *_dataops.go: Business logic and data operations
|
||||
// - dialogs.go: Reusable dialog components
|
||||
// - load_save_screens.go: File I/O and format selection
|
||||
// - main_menu.go: Primary navigation menu
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// editor := ui.NewSchemaEditor(database)
|
||||
// if err := editor.Run(); err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// Or with pre-configured load/save options:
|
||||
//
|
||||
// editor := ui.NewSchemaEditorWithConfigs(database, loadConfig, saveConfig)
|
||||
// if err := editor.Run(); err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// # Navigation
|
||||
//
|
||||
// - Arrow keys: Navigate between items
|
||||
// - Enter: Select/edit item
|
||||
// - Tab/Shift+Tab: Navigate between buttons
|
||||
// - Escape: Go back/cancel
|
||||
// - Letter shortcuts: Quick actions (e.g., 'n' for new, 'e' for edit, 'd' for delete)
|
||||
//
|
||||
// # Integration
|
||||
//
|
||||
// The editor integrates with all readers and writers, supporting load/save operations
|
||||
// for any format supported by RelSpec (DBML, PostgreSQL, GORM, Prisma, etc.).
|
||||
package ui
|
||||
115
pkg/ui/relation_dataops.go
Normal file
115
pkg/ui/relation_dataops.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package ui
|
||||
|
||||
import "git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
|
||||
// Relationship data operations - business logic for relationship management
|
||||
|
||||
// CreateRelationship creates a new relationship and adds it to a table
|
||||
func (se *SchemaEditor) CreateRelationship(schemaIndex, tableIndex int, rel *models.Relationship) *models.Relationship {
|
||||
if schemaIndex < 0 || schemaIndex >= len(se.db.Schemas) {
|
||||
return nil
|
||||
}
|
||||
|
||||
schema := se.db.Schemas[schemaIndex]
|
||||
if tableIndex < 0 || tableIndex >= len(schema.Tables) {
|
||||
return nil
|
||||
}
|
||||
|
||||
table := schema.Tables[tableIndex]
|
||||
if table.Relationships == nil {
|
||||
table.Relationships = make(map[string]*models.Relationship)
|
||||
}
|
||||
|
||||
table.Relationships[rel.Name] = rel
|
||||
table.UpdateDate()
|
||||
return rel
|
||||
}
|
||||
|
||||
// UpdateRelationship updates an existing relationship
|
||||
func (se *SchemaEditor) UpdateRelationship(schemaIndex, tableIndex int, oldName string, rel *models.Relationship) bool {
|
||||
if schemaIndex < 0 || schemaIndex >= len(se.db.Schemas) {
|
||||
return false
|
||||
}
|
||||
|
||||
schema := se.db.Schemas[schemaIndex]
|
||||
if tableIndex < 0 || tableIndex >= len(schema.Tables) {
|
||||
return false
|
||||
}
|
||||
|
||||
table := schema.Tables[tableIndex]
|
||||
if table.Relationships == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete old entry if name changed
|
||||
if oldName != rel.Name {
|
||||
delete(table.Relationships, oldName)
|
||||
}
|
||||
|
||||
table.Relationships[rel.Name] = rel
|
||||
table.UpdateDate()
|
||||
return true
|
||||
}
|
||||
|
||||
// DeleteRelationship removes a relationship from a table
|
||||
func (se *SchemaEditor) DeleteRelationship(schemaIndex, tableIndex int, relName string) bool {
|
||||
if schemaIndex < 0 || schemaIndex >= len(se.db.Schemas) {
|
||||
return false
|
||||
}
|
||||
|
||||
schema := se.db.Schemas[schemaIndex]
|
||||
if tableIndex < 0 || tableIndex >= len(schema.Tables) {
|
||||
return false
|
||||
}
|
||||
|
||||
table := schema.Tables[tableIndex]
|
||||
if table.Relationships == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
delete(table.Relationships, relName)
|
||||
table.UpdateDate()
|
||||
return true
|
||||
}
|
||||
|
||||
// GetRelationship returns a relationship by name
|
||||
func (se *SchemaEditor) GetRelationship(schemaIndex, tableIndex int, relName string) *models.Relationship {
|
||||
if schemaIndex < 0 || schemaIndex >= len(se.db.Schemas) {
|
||||
return nil
|
||||
}
|
||||
|
||||
schema := se.db.Schemas[schemaIndex]
|
||||
if tableIndex < 0 || tableIndex >= len(schema.Tables) {
|
||||
return nil
|
||||
}
|
||||
|
||||
table := schema.Tables[tableIndex]
|
||||
if table.Relationships == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return table.Relationships[relName]
|
||||
}
|
||||
|
||||
// GetRelationshipNames returns all relationship names for a table
|
||||
func (se *SchemaEditor) GetRelationshipNames(schemaIndex, tableIndex int) []string {
|
||||
if schemaIndex < 0 || schemaIndex >= len(se.db.Schemas) {
|
||||
return nil
|
||||
}
|
||||
|
||||
schema := se.db.Schemas[schemaIndex]
|
||||
if tableIndex < 0 || tableIndex >= len(schema.Tables) {
|
||||
return nil
|
||||
}
|
||||
|
||||
table := schema.Tables[tableIndex]
|
||||
if table.Relationships == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
names := make([]string, 0, len(table.Relationships))
|
||||
for name := range table.Relationships {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
486
pkg/ui/relation_screens.go
Normal file
486
pkg/ui/relation_screens.go
Normal file
@@ -0,0 +1,486 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gdamore/tcell/v2"
|
||||
"github.com/rivo/tview"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
)
|
||||
|
||||
// showRelationshipList displays all relationships for a table
|
||||
func (se *SchemaEditor) showRelationshipList(schemaIndex, tableIndex int) {
|
||||
table := se.GetTable(schemaIndex, tableIndex)
|
||||
if table == nil {
|
||||
return
|
||||
}
|
||||
|
||||
flex := tview.NewFlex().SetDirection(tview.FlexRow)
|
||||
|
||||
// Title
|
||||
title := tview.NewTextView().
|
||||
SetText(fmt.Sprintf("[::b]Relationships for Table: %s", table.Name)).
|
||||
SetDynamicColors(true).
|
||||
SetTextAlign(tview.AlignCenter)
|
||||
|
||||
// Create relationships table
|
||||
relTable := tview.NewTable().SetBorders(true).SetSelectable(true, false).SetFixed(1, 0)
|
||||
|
||||
// Add header row
|
||||
headers := []string{"Name", "Type", "From Columns", "To Table", "To Columns", "Description"}
|
||||
headerWidths := []int{20, 15, 20, 20, 20}
|
||||
for i, header := range headers {
|
||||
padding := ""
|
||||
if i < len(headerWidths) {
|
||||
padding = strings.Repeat(" ", headerWidths[i]-len(header))
|
||||
}
|
||||
cell := tview.NewTableCell(header + padding).
|
||||
SetTextColor(tcell.ColorYellow).
|
||||
SetSelectable(false).
|
||||
SetAlign(tview.AlignLeft)
|
||||
relTable.SetCell(0, i, cell)
|
||||
}
|
||||
|
||||
// Get relationship names
|
||||
relNames := se.GetRelationshipNames(schemaIndex, tableIndex)
|
||||
for row, relName := range relNames {
|
||||
rel := table.Relationships[relName]
|
||||
|
||||
// Name
|
||||
nameStr := fmt.Sprintf("%-20s", rel.Name)
|
||||
nameCell := tview.NewTableCell(nameStr).SetSelectable(true)
|
||||
relTable.SetCell(row+1, 0, nameCell)
|
||||
|
||||
// Type
|
||||
typeStr := fmt.Sprintf("%-15s", string(rel.Type))
|
||||
typeCell := tview.NewTableCell(typeStr).SetSelectable(true)
|
||||
relTable.SetCell(row+1, 1, typeCell)
|
||||
|
||||
// From Columns
|
||||
fromColsStr := strings.Join(rel.FromColumns, ", ")
|
||||
fromColsStr = fmt.Sprintf("%-20s", fromColsStr)
|
||||
fromColsCell := tview.NewTableCell(fromColsStr).SetSelectable(true)
|
||||
relTable.SetCell(row+1, 2, fromColsCell)
|
||||
|
||||
// To Table
|
||||
toTableStr := rel.ToTable
|
||||
if rel.ToSchema != "" && rel.ToSchema != table.Schema {
|
||||
toTableStr = rel.ToSchema + "." + rel.ToTable
|
||||
}
|
||||
toTableStr = fmt.Sprintf("%-20s", toTableStr)
|
||||
toTableCell := tview.NewTableCell(toTableStr).SetSelectable(true)
|
||||
relTable.SetCell(row+1, 3, toTableCell)
|
||||
|
||||
// To Columns
|
||||
toColsStr := strings.Join(rel.ToColumns, ", ")
|
||||
toColsStr = fmt.Sprintf("%-20s", toColsStr)
|
||||
toColsCell := tview.NewTableCell(toColsStr).SetSelectable(true)
|
||||
relTable.SetCell(row+1, 4, toColsCell)
|
||||
|
||||
// Description
|
||||
descCell := tview.NewTableCell(rel.Description).SetSelectable(true)
|
||||
relTable.SetCell(row+1, 5, descCell)
|
||||
}
|
||||
|
||||
relTable.SetTitle(" Relationships ").SetBorder(true).SetTitleAlign(tview.AlignLeft)
|
||||
|
||||
// Action buttons
|
||||
btnFlex := tview.NewFlex()
|
||||
btnNew := tview.NewButton("New Relationship [n]").SetSelectedFunc(func() {
|
||||
se.showNewRelationshipDialog(schemaIndex, tableIndex)
|
||||
})
|
||||
btnEdit := tview.NewButton("Edit [e]").SetSelectedFunc(func() {
|
||||
row, _ := relTable.GetSelection()
|
||||
if row > 0 && row <= len(relNames) {
|
||||
relName := relNames[row-1]
|
||||
se.showEditRelationshipDialog(schemaIndex, tableIndex, relName)
|
||||
}
|
||||
})
|
||||
btnDelete := tview.NewButton("Delete [d]").SetSelectedFunc(func() {
|
||||
row, _ := relTable.GetSelection()
|
||||
if row > 0 && row <= len(relNames) {
|
||||
relName := relNames[row-1]
|
||||
se.showDeleteRelationshipConfirm(schemaIndex, tableIndex, relName)
|
||||
}
|
||||
})
|
||||
btnBack := tview.NewButton("Back [b]").SetSelectedFunc(func() {
|
||||
se.pages.RemovePage("relationships")
|
||||
se.pages.SwitchToPage("table-editor")
|
||||
})
|
||||
|
||||
// Set up button navigation
|
||||
btnNew.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyBacktab {
|
||||
se.app.SetFocus(relTable)
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyTab {
|
||||
se.app.SetFocus(btnEdit)
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
btnEdit.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyBacktab {
|
||||
se.app.SetFocus(btnNew)
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyTab {
|
||||
se.app.SetFocus(btnDelete)
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
btnDelete.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyBacktab {
|
||||
se.app.SetFocus(btnEdit)
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyTab {
|
||||
se.app.SetFocus(btnBack)
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
btnBack.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyBacktab {
|
||||
se.app.SetFocus(btnDelete)
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyTab {
|
||||
se.app.SetFocus(relTable)
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
btnFlex.AddItem(btnNew, 0, 1, true).
|
||||
AddItem(btnEdit, 0, 1, false).
|
||||
AddItem(btnDelete, 0, 1, false).
|
||||
AddItem(btnBack, 0, 1, false)
|
||||
|
||||
relTable.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyEscape {
|
||||
se.pages.RemovePage("relationships")
|
||||
se.pages.SwitchToPage("table-editor")
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyTab {
|
||||
se.app.SetFocus(btnNew)
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyEnter {
|
||||
row, _ := relTable.GetSelection()
|
||||
if row > 0 && row <= len(relNames) {
|
||||
relName := relNames[row-1]
|
||||
se.showEditRelationshipDialog(schemaIndex, tableIndex, relName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if event.Rune() == 'n' {
|
||||
se.showNewRelationshipDialog(schemaIndex, tableIndex)
|
||||
return nil
|
||||
}
|
||||
if event.Rune() == 'e' {
|
||||
row, _ := relTable.GetSelection()
|
||||
if row > 0 && row <= len(relNames) {
|
||||
relName := relNames[row-1]
|
||||
se.showEditRelationshipDialog(schemaIndex, tableIndex, relName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if event.Rune() == 'd' {
|
||||
row, _ := relTable.GetSelection()
|
||||
if row > 0 && row <= len(relNames) {
|
||||
relName := relNames[row-1]
|
||||
se.showDeleteRelationshipConfirm(schemaIndex, tableIndex, relName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if event.Rune() == 'b' {
|
||||
se.pages.RemovePage("relationships")
|
||||
se.pages.SwitchToPage("table-editor")
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
flex.AddItem(title, 1, 0, false).
|
||||
AddItem(relTable, 0, 1, true).
|
||||
AddItem(btnFlex, 1, 0, false)
|
||||
|
||||
se.pages.AddPage("relationships", flex, true, true)
|
||||
}
|
||||
|
||||
// showNewRelationshipDialog shows dialog to create a new relationship
|
||||
func (se *SchemaEditor) showNewRelationshipDialog(schemaIndex, tableIndex int) {
|
||||
table := se.GetTable(schemaIndex, tableIndex)
|
||||
if table == nil {
|
||||
return
|
||||
}
|
||||
|
||||
form := tview.NewForm()
|
||||
|
||||
// Collect all tables for dropdown
|
||||
var allTables []string
|
||||
var tableMap []struct{ schemaIdx, tableIdx int }
|
||||
for si, schema := range se.db.Schemas {
|
||||
for ti, t := range schema.Tables {
|
||||
tableName := t.Name
|
||||
if schema.Name != table.Schema {
|
||||
tableName = schema.Name + "." + t.Name
|
||||
}
|
||||
allTables = append(allTables, tableName)
|
||||
tableMap = append(tableMap, struct{ schemaIdx, tableIdx int }{si, ti})
|
||||
}
|
||||
}
|
||||
|
||||
relName := ""
|
||||
relType := models.OneToMany
|
||||
fromColumns := ""
|
||||
toColumns := ""
|
||||
description := ""
|
||||
selectedTableIdx := 0
|
||||
|
||||
form.AddInputField("Name", "", 40, nil, func(value string) {
|
||||
relName = value
|
||||
})
|
||||
|
||||
form.AddDropDown("Type", []string{
|
||||
string(models.OneToOne),
|
||||
string(models.OneToMany),
|
||||
string(models.ManyToMany),
|
||||
}, 1, func(option string, optionIndex int) {
|
||||
relType = models.RelationType(option)
|
||||
})
|
||||
|
||||
form.AddInputField("From Columns (comma-separated)", "", 40, nil, func(value string) {
|
||||
fromColumns = value
|
||||
})
|
||||
|
||||
form.AddDropDown("To Table", allTables, 0, func(option string, optionIndex int) {
|
||||
selectedTableIdx = optionIndex
|
||||
})
|
||||
|
||||
form.AddInputField("To Columns (comma-separated)", "", 40, nil, func(value string) {
|
||||
toColumns = value
|
||||
})
|
||||
|
||||
form.AddInputField("Description", "", 60, nil, func(value string) {
|
||||
description = value
|
||||
})
|
||||
|
||||
form.AddButton("Save", func() {
|
||||
if relName == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse columns
|
||||
fromCols := strings.Split(fromColumns, ",")
|
||||
for i := range fromCols {
|
||||
fromCols[i] = strings.TrimSpace(fromCols[i])
|
||||
}
|
||||
|
||||
toCols := strings.Split(toColumns, ",")
|
||||
for i := range toCols {
|
||||
toCols[i] = strings.TrimSpace(toCols[i])
|
||||
}
|
||||
|
||||
// Get target table
|
||||
targetSchema := se.db.Schemas[tableMap[selectedTableIdx].schemaIdx]
|
||||
targetTable := targetSchema.Tables[tableMap[selectedTableIdx].tableIdx]
|
||||
|
||||
rel := models.InitRelationship(relName, relType)
|
||||
rel.FromTable = table.Name
|
||||
rel.FromSchema = table.Schema
|
||||
rel.FromColumns = fromCols
|
||||
rel.ToTable = targetTable.Name
|
||||
rel.ToSchema = targetTable.Schema
|
||||
rel.ToColumns = toCols
|
||||
rel.Description = description
|
||||
|
||||
se.CreateRelationship(schemaIndex, tableIndex, rel)
|
||||
|
||||
se.pages.RemovePage("new-relationship")
|
||||
se.pages.RemovePage("relationships")
|
||||
se.showRelationshipList(schemaIndex, tableIndex)
|
||||
})
|
||||
|
||||
form.AddButton("Back", func() {
|
||||
se.pages.RemovePage("new-relationship")
|
||||
})
|
||||
|
||||
form.SetBorder(true).SetTitle(" New Relationship ").SetTitleAlign(tview.AlignLeft)
|
||||
form.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyEscape {
|
||||
se.pages.RemovePage("new-relationship")
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
se.pages.AddPage("new-relationship", form, true, true)
|
||||
}
|
||||
|
||||
// showEditRelationshipDialog shows dialog to edit a relationship
|
||||
func (se *SchemaEditor) showEditRelationshipDialog(schemaIndex, tableIndex int, relName string) {
|
||||
table := se.GetTable(schemaIndex, tableIndex)
|
||||
if table == nil {
|
||||
return
|
||||
}
|
||||
|
||||
rel := se.GetRelationship(schemaIndex, tableIndex, relName)
|
||||
if rel == nil {
|
||||
return
|
||||
}
|
||||
|
||||
form := tview.NewForm()
|
||||
|
||||
// Collect all tables for dropdown
|
||||
var allTables []string
|
||||
var tableMap []struct{ schemaIdx, tableIdx int }
|
||||
selectedTableIdx := 0
|
||||
for si, schema := range se.db.Schemas {
|
||||
for ti, t := range schema.Tables {
|
||||
tableName := t.Name
|
||||
if schema.Name != table.Schema {
|
||||
tableName = schema.Name + "." + t.Name
|
||||
}
|
||||
allTables = append(allTables, tableName)
|
||||
tableMap = append(tableMap, struct{ schemaIdx, tableIdx int }{si, ti})
|
||||
|
||||
// Check if this is the current target table
|
||||
if t.Name == rel.ToTable && schema.Name == rel.ToSchema {
|
||||
selectedTableIdx = len(allTables) - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newName := rel.Name
|
||||
relType := rel.Type
|
||||
fromColumns := strings.Join(rel.FromColumns, ", ")
|
||||
toColumns := strings.Join(rel.ToColumns, ", ")
|
||||
description := rel.Description
|
||||
|
||||
form.AddInputField("Name", rel.Name, 40, nil, func(value string) {
|
||||
newName = value
|
||||
})
|
||||
|
||||
// Find initial type index
|
||||
typeIdx := 1 // OneToMany default
|
||||
typeOptions := []string{
|
||||
string(models.OneToOne),
|
||||
string(models.OneToMany),
|
||||
string(models.ManyToMany),
|
||||
}
|
||||
for i, opt := range typeOptions {
|
||||
if opt == string(rel.Type) {
|
||||
typeIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
form.AddDropDown("Type", typeOptions, typeIdx, func(option string, optionIndex int) {
|
||||
relType = models.RelationType(option)
|
||||
})
|
||||
|
||||
form.AddInputField("From Columns (comma-separated)", fromColumns, 40, nil, func(value string) {
|
||||
fromColumns = value
|
||||
})
|
||||
|
||||
form.AddDropDown("To Table", allTables, selectedTableIdx, func(option string, optionIndex int) {
|
||||
selectedTableIdx = optionIndex
|
||||
})
|
||||
|
||||
form.AddInputField("To Columns (comma-separated)", toColumns, 40, nil, func(value string) {
|
||||
toColumns = value
|
||||
})
|
||||
|
||||
form.AddInputField("Description", rel.Description, 60, nil, func(value string) {
|
||||
description = value
|
||||
})
|
||||
|
||||
form.AddButton("Save", func() {
|
||||
if newName == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse columns
|
||||
fromCols := strings.Split(fromColumns, ",")
|
||||
for i := range fromCols {
|
||||
fromCols[i] = strings.TrimSpace(fromCols[i])
|
||||
}
|
||||
|
||||
toCols := strings.Split(toColumns, ",")
|
||||
for i := range toCols {
|
||||
toCols[i] = strings.TrimSpace(toCols[i])
|
||||
}
|
||||
|
||||
// Get target table
|
||||
targetSchema := se.db.Schemas[tableMap[selectedTableIdx].schemaIdx]
|
||||
targetTable := targetSchema.Tables[tableMap[selectedTableIdx].tableIdx]
|
||||
|
||||
updatedRel := models.InitRelationship(newName, relType)
|
||||
updatedRel.FromTable = table.Name
|
||||
updatedRel.FromSchema = table.Schema
|
||||
updatedRel.FromColumns = fromCols
|
||||
updatedRel.ToTable = targetTable.Name
|
||||
updatedRel.ToSchema = targetTable.Schema
|
||||
updatedRel.ToColumns = toCols
|
||||
updatedRel.Description = description
|
||||
updatedRel.GUID = rel.GUID
|
||||
|
||||
se.UpdateRelationship(schemaIndex, tableIndex, relName, updatedRel)
|
||||
|
||||
se.pages.RemovePage("edit-relationship")
|
||||
se.pages.RemovePage("relationships")
|
||||
se.showRelationshipList(schemaIndex, tableIndex)
|
||||
})
|
||||
|
||||
form.AddButton("Back", func() {
|
||||
se.pages.RemovePage("edit-relationship")
|
||||
})
|
||||
|
||||
form.SetBorder(true).SetTitle(" Edit Relationship ").SetTitleAlign(tview.AlignLeft)
|
||||
form.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyEscape {
|
||||
se.pages.RemovePage("edit-relationship")
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
se.pages.AddPage("edit-relationship", form, true, true)
|
||||
}
|
||||
|
||||
// showDeleteRelationshipConfirm shows confirmation dialog for deleting a relationship
|
||||
func (se *SchemaEditor) showDeleteRelationshipConfirm(schemaIndex, tableIndex int, relName string) {
|
||||
modal := tview.NewModal().
|
||||
SetText(fmt.Sprintf("Delete relationship '%s'? This action cannot be undone.", relName)).
|
||||
AddButtons([]string{"Cancel", "Delete"}).
|
||||
SetDoneFunc(func(buttonIndex int, buttonLabel string) {
|
||||
if buttonLabel == "Delete" {
|
||||
se.DeleteRelationship(schemaIndex, tableIndex, relName)
|
||||
se.pages.RemovePage("delete-relationship-confirm")
|
||||
se.pages.RemovePage("relationships")
|
||||
se.showRelationshipList(schemaIndex, tableIndex)
|
||||
} else {
|
||||
se.pages.RemovePage("delete-relationship-confirm")
|
||||
}
|
||||
})
|
||||
|
||||
modal.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyEscape {
|
||||
se.pages.RemovePage("delete-relationship-confirm")
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
se.pages.AddAndSwitchToPage("delete-relationship-confirm", modal, true)
|
||||
}
|
||||
@@ -270,6 +270,9 @@ func (se *SchemaEditor) showTableEditor(schemaIndex, tableIndex int, table *mode
|
||||
se.showColumnEditor(schemaIndex, tableIndex, row-1, column)
|
||||
}
|
||||
})
|
||||
btnRelations := tview.NewButton("Relations [r]").SetSelectedFunc(func() {
|
||||
se.showRelationshipList(schemaIndex, tableIndex)
|
||||
})
|
||||
btnDelTable := tview.NewButton("Delete Table [d]").SetSelectedFunc(func() {
|
||||
se.showDeleteTableConfirm(schemaIndex, tableIndex)
|
||||
})
|
||||
@@ -308,6 +311,18 @@ func (se *SchemaEditor) showTableEditor(schemaIndex, tableIndex int, table *mode
|
||||
se.app.SetFocus(btnEditColumn)
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyTab {
|
||||
se.app.SetFocus(btnRelations)
|
||||
return nil
|
||||
}
|
||||
return event
|
||||
})
|
||||
|
||||
btnRelations.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyBacktab {
|
||||
se.app.SetFocus(btnEditTable)
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyTab {
|
||||
se.app.SetFocus(btnDelTable)
|
||||
return nil
|
||||
@@ -317,7 +332,7 @@ func (se *SchemaEditor) showTableEditor(schemaIndex, tableIndex int, table *mode
|
||||
|
||||
btnDelTable.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
|
||||
if event.Key() == tcell.KeyBacktab {
|
||||
se.app.SetFocus(btnEditTable)
|
||||
se.app.SetFocus(btnRelations)
|
||||
return nil
|
||||
}
|
||||
if event.Key() == tcell.KeyTab {
|
||||
@@ -342,6 +357,7 @@ func (se *SchemaEditor) showTableEditor(schemaIndex, tableIndex int, table *mode
|
||||
btnFlex.AddItem(btnNewCol, 0, 1, true).
|
||||
AddItem(btnEditColumn, 0, 1, false).
|
||||
AddItem(btnEditTable, 0, 1, false).
|
||||
AddItem(btnRelations, 0, 1, false).
|
||||
AddItem(btnDelTable, 0, 1, false).
|
||||
AddItem(btnBack, 0, 1, false)
|
||||
|
||||
@@ -373,6 +389,10 @@ func (se *SchemaEditor) showTableEditor(schemaIndex, tableIndex int, table *mode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if event.Rune() == 'r' {
|
||||
se.showRelationshipList(schemaIndex, tableIndex)
|
||||
return nil
|
||||
}
|
||||
if event.Rune() == 'b' {
|
||||
se.pages.RemovePage("table-editor")
|
||||
se.pages.SwitchToPage("schema-editor")
|
||||
|
||||
@@ -110,8 +110,7 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper, fl
|
||||
tableName := writers.QualifiedTableName(schema, table.Name, flattenSchema)
|
||||
|
||||
// Generate model name: Model + Schema + Table (all PascalCase)
|
||||
singularTable := Singularize(table.Name)
|
||||
tablePart := SnakeCaseToPascalCase(singularTable)
|
||||
tablePart := SnakeCaseToPascalCase(table.Name)
|
||||
|
||||
// Include schema name in model name
|
||||
var modelName string
|
||||
@@ -217,6 +216,21 @@ func resolveFieldNameCollision(fieldName string) string {
|
||||
return fieldName
|
||||
}
|
||||
|
||||
// sortConstraints sorts constraints by sequence, then by name
|
||||
func sortConstraints(constraints map[string]*models.Constraint) []*models.Constraint {
|
||||
result := make([]*models.Constraint, 0, len(constraints))
|
||||
for _, c := range constraints {
|
||||
result = append(result, c)
|
||||
}
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
if result[i].Sequence > 0 && result[j].Sequence > 0 {
|
||||
return result[i].Sequence < result[j].Sequence
|
||||
}
|
||||
return result[i].Name < result[j].Name
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// sortColumns sorts columns by sequence, then by name
|
||||
func sortColumns(columns map[string]*models.Column) []*models.Column {
|
||||
result := make([]*models.Column, 0, len(columns))
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
@@ -39,14 +40,7 @@ func (tm *TypeMapper) SQLTypeToGoType(sqlType string, notNull bool) string {
|
||||
|
||||
// extractBaseType extracts the base type from a SQL type string
|
||||
func (tm *TypeMapper) extractBaseType(sqlType string) string {
|
||||
sqlType = strings.ToLower(strings.TrimSpace(sqlType))
|
||||
|
||||
// Remove everything after '('
|
||||
if idx := strings.Index(sqlType, "("); idx > 0 {
|
||||
sqlType = sqlType[:idx]
|
||||
}
|
||||
|
||||
return sqlType
|
||||
return pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
|
||||
}
|
||||
|
||||
// isSimpleType checks if a type should use base Go type when NOT NULL
|
||||
@@ -62,6 +56,17 @@ func (tm *TypeMapper) isSimpleType(sqlType string) bool {
|
||||
return simpleTypes[sqlType]
|
||||
}
|
||||
|
||||
// isSerialType checks if a SQL type is a serial type (auto-incrementing)
|
||||
func (tm *TypeMapper) isSerialType(sqlType string) bool {
|
||||
baseType := tm.extractBaseType(sqlType)
|
||||
serialTypes := map[string]bool{
|
||||
"serial": true,
|
||||
"bigserial": true,
|
||||
"smallserial": true,
|
||||
}
|
||||
return serialTypes[baseType]
|
||||
}
|
||||
|
||||
// baseGoType returns the base Go type for a SQL type (not null, simple types only)
|
||||
func (tm *TypeMapper) baseGoType(sqlType string) string {
|
||||
typeMap := map[string]string{
|
||||
@@ -122,10 +127,10 @@ func (tm *TypeMapper) bunGoType(sqlType string) string {
|
||||
"decimal": tm.sqlTypesAlias + ".SqlFloat64",
|
||||
|
||||
// Date/Time types
|
||||
"timestamp": tm.sqlTypesAlias + ".SqlTime",
|
||||
"timestamp without time zone": tm.sqlTypesAlias + ".SqlTime",
|
||||
"timestamp with time zone": tm.sqlTypesAlias + ".SqlTime",
|
||||
"timestamptz": tm.sqlTypesAlias + ".SqlTime",
|
||||
"timestamp": tm.sqlTypesAlias + ".SqlTimeStamp",
|
||||
"timestamp without time zone": tm.sqlTypesAlias + ".SqlTimeStamp",
|
||||
"timestamp with time zone": tm.sqlTypesAlias + ".SqlTimeStamp",
|
||||
"timestamptz": tm.sqlTypesAlias + ".SqlTimeStamp",
|
||||
"date": tm.sqlTypesAlias + ".SqlDate",
|
||||
"time": tm.sqlTypesAlias + ".SqlTime",
|
||||
"time without time zone": tm.sqlTypesAlias + ".SqlTime",
|
||||
@@ -173,9 +178,10 @@ func (tm *TypeMapper) BuildBunTag(column *models.Column, table *models.Table) st
|
||||
if column.Type != "" {
|
||||
// Sanitize type to remove backticks
|
||||
typeStr := writers.SanitizeStructTagValue(column.Type)
|
||||
if column.Length > 0 {
|
||||
hasExplicitTypeModifier := pgsql.HasExplicitTypeModifier(typeStr)
|
||||
if !hasExplicitTypeModifier && column.Length > 0 {
|
||||
typeStr = fmt.Sprintf("%s(%d)", typeStr, column.Length)
|
||||
} else if column.Precision > 0 {
|
||||
} else if !hasExplicitTypeModifier && column.Precision > 0 {
|
||||
if column.Scale > 0 {
|
||||
typeStr = fmt.Sprintf("%s(%d,%d)", typeStr, column.Precision, column.Scale)
|
||||
} else {
|
||||
@@ -190,10 +196,15 @@ func (tm *TypeMapper) BuildBunTag(column *models.Column, table *models.Table) st
|
||||
parts = append(parts, "pk")
|
||||
}
|
||||
|
||||
// Auto increment (for serial types or explicit auto_increment)
|
||||
if column.AutoIncrement || tm.isSerialType(column.Type) {
|
||||
parts = append(parts, "autoincrement")
|
||||
}
|
||||
|
||||
// Default value
|
||||
if column.Default != nil {
|
||||
// Sanitize default value to remove backticks
|
||||
safeDefault := writers.SanitizeStructTagValue(fmt.Sprintf("%v", column.Default))
|
||||
// Sanitize default value to remove backticks, then quote based on column type
|
||||
safeDefault := writers.QuoteDefaultValue(writers.SanitizeStructTagValue(fmt.Sprintf("%v", column.Default)), column.Type)
|
||||
parts = append(parts, fmt.Sprintf("default:%s", safeDefault))
|
||||
}
|
||||
|
||||
@@ -251,7 +262,15 @@ func (tm *TypeMapper) BuildRelationshipTag(constraint *models.Constraint, relTyp
|
||||
if len(constraint.Columns) > 0 && len(constraint.ReferencedColumns) > 0 {
|
||||
localCol := constraint.Columns[0]
|
||||
foreignCol := constraint.ReferencedColumns[0]
|
||||
parts = append(parts, fmt.Sprintf("join:%s=%s", localCol, foreignCol))
|
||||
|
||||
// For has-many relationships, swap the columns
|
||||
// has-one: join:fk_in_this_table=pk_in_other_table
|
||||
// has-many: join:pk_in_this_table=fk_in_other_table
|
||||
if relType == "has-many" {
|
||||
parts = append(parts, fmt.Sprintf("join:%s=%s", foreignCol, localCol))
|
||||
} else {
|
||||
parts = append(parts, fmt.Sprintf("join:%s=%s", localCol, foreignCol))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(parts, ",")
|
||||
|
||||
@@ -242,7 +242,7 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
|
||||
usedFieldNames := make(map[string]int)
|
||||
|
||||
// For each foreign key in this table, add a belongs-to/has-one relationship
|
||||
for _, constraint := range table.Constraints {
|
||||
for _, constraint := range sortConstraints(table.Constraints) {
|
||||
if constraint.Type != models.ForeignKeyConstraint {
|
||||
continue
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
|
||||
continue // Skip self
|
||||
}
|
||||
|
||||
for _, constraint := range otherTable.Constraints {
|
||||
for _, constraint := range sortConstraints(otherTable.Constraints) {
|
||||
if constraint.Type != models.ForeignKeyConstraint {
|
||||
continue
|
||||
}
|
||||
@@ -318,8 +318,7 @@ func (w *Writer) findTable(schemaName, tableName string, db *models.Database) *m
|
||||
|
||||
// getModelName generates the model name from schema and table name
|
||||
func (w *Writer) getModelName(schemaName, tableName string) string {
|
||||
singular := Singularize(tableName)
|
||||
tablePart := SnakeCaseToPascalCase(singular)
|
||||
tablePart := SnakeCaseToPascalCase(tableName)
|
||||
|
||||
// Include schema name in model name
|
||||
var modelName string
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
// Verify key elements are present
|
||||
expectations := []string{
|
||||
"package models",
|
||||
"type ModelPublicUser struct",
|
||||
"type ModelPublicUsers struct",
|
||||
"bun.BaseModel",
|
||||
"table:public.users",
|
||||
"alias:users",
|
||||
@@ -78,9 +78,9 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
"resolvespec_common.SqlTime",
|
||||
"bun:\"id",
|
||||
"bun:\"email",
|
||||
"func (m ModelPublicUser) TableName() string",
|
||||
"func (m ModelPublicUsers) TableName() string",
|
||||
"return \"public.users\"",
|
||||
"func (m ModelPublicUser) GetID() int64",
|
||||
"func (m ModelPublicUsers) GetID() int64",
|
||||
}
|
||||
|
||||
for _, expected := range expectations {
|
||||
@@ -90,8 +90,8 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify Bun-specific elements
|
||||
if !strings.Contains(generated, "bun:\"id,type:bigint,pk,") {
|
||||
t.Errorf("Missing Bun-style primary key tag")
|
||||
if !strings.Contains(generated, "bun:\"id,type:bigint,pk,autoincrement,") {
|
||||
t.Errorf("Missing Bun-style primary key tag with autoincrement")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -308,14 +308,20 @@ func TestWriter_MultipleReferencesToSameTable(t *testing.T) {
|
||||
filepointerStr := string(filepointerContent)
|
||||
|
||||
// Should have two different has-many relationships with unique names
|
||||
hasManyExpectations := []string{
|
||||
"RelRIDFilepointerRequestOrgAPIEvents", // Has many via rid_filepointer_request
|
||||
"RelRIDFilepointerResponseOrgAPIEvents", // Has many via rid_filepointer_response
|
||||
hasManyExpectations := []struct {
|
||||
fieldName string
|
||||
tag string
|
||||
}{
|
||||
{"RelRIDFilepointerRequestOrgAPIEvents", "join:id_filepointer=rid_filepointer_request"}, // Has many via rid_filepointer_request
|
||||
{"RelRIDFilepointerResponseOrgAPIEvents", "join:id_filepointer=rid_filepointer_response"}, // Has many via rid_filepointer_response
|
||||
}
|
||||
|
||||
for _, exp := range hasManyExpectations {
|
||||
if !strings.Contains(filepointerStr, exp) {
|
||||
t.Errorf("Missing has-many relationship field: %s\nGenerated:\n%s", exp, filepointerStr)
|
||||
if !strings.Contains(filepointerStr, exp.fieldName) {
|
||||
t.Errorf("Missing has-many relationship field: %s\nGenerated:\n%s", exp.fieldName, filepointerStr)
|
||||
}
|
||||
if !strings.Contains(filepointerStr, exp.tag) {
|
||||
t.Errorf("Missing has-many relationship join tag: %s\nGenerated:\n%s", exp.tag, filepointerStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -455,10 +461,10 @@ func TestWriter_MultipleHasManyRelationships(t *testing.T) {
|
||||
|
||||
// Verify all has-many relationships have unique names
|
||||
hasManyExpectations := []string{
|
||||
"RelRIDAPIProviderOrgLogins", // Has many via Login
|
||||
"RelRIDAPIProviderOrgLogins", // Has many via Login
|
||||
"RelRIDAPIProviderOrgFilepointers", // Has many via Filepointer
|
||||
"RelRIDAPIProviderOrgAPIEvents", // Has many via APIEvent
|
||||
"RelRIDOwner", // Has one via rid_owner
|
||||
"RelRIDAPIProviderOrgAPIEvents", // Has many via APIEvent
|
||||
"RelRIDOwner", // Has one via rid_owner
|
||||
}
|
||||
|
||||
for _, exp := range hasManyExpectations {
|
||||
@@ -561,8 +567,8 @@ func TestTypeMapper_SQLTypeToGoType_Bun(t *testing.T) {
|
||||
{"bigint", false, "resolvespec_common.SqlInt64"},
|
||||
{"varchar", true, "resolvespec_common.SqlString"}, // Bun uses sql types even for NOT NULL strings
|
||||
{"varchar", false, "resolvespec_common.SqlString"},
|
||||
{"timestamp", true, "resolvespec_common.SqlTime"},
|
||||
{"timestamp", false, "resolvespec_common.SqlTime"},
|
||||
{"timestamp", true, "resolvespec_common.SqlTimeStamp"},
|
||||
{"timestamp", false, "resolvespec_common.SqlTimeStamp"},
|
||||
{"date", false, "resolvespec_common.SqlDate"},
|
||||
{"boolean", true, "bool"},
|
||||
{"boolean", false, "resolvespec_common.SqlBool"},
|
||||
@@ -609,14 +615,75 @@ func TestTypeMapper_BuildBunTag(t *testing.T) {
|
||||
want: []string{"email,", "type:varchar(255),", "nullzero,"},
|
||||
},
|
||||
{
|
||||
name: "with default",
|
||||
name: "with default string",
|
||||
column: &models.Column{
|
||||
Name: "status",
|
||||
Type: "text",
|
||||
NotNull: true,
|
||||
Default: "active",
|
||||
},
|
||||
want: []string{"status,", "type:text,", "default:active,"},
|
||||
want: []string{"status,", "type:text,", "default:'active',"},
|
||||
},
|
||||
{
|
||||
name: "with default integer",
|
||||
column: &models.Column{
|
||||
Name: "retries",
|
||||
Type: "integer",
|
||||
NotNull: true,
|
||||
Default: "0",
|
||||
},
|
||||
want: []string{"retries,", "type:integer,", "default:0,"},
|
||||
},
|
||||
{
|
||||
name: "with default boolean",
|
||||
column: &models.Column{
|
||||
Name: "active",
|
||||
Type: "boolean",
|
||||
NotNull: true,
|
||||
Default: "true",
|
||||
},
|
||||
want: []string{"active,", "type:boolean,", "default:true,"},
|
||||
},
|
||||
{
|
||||
name: "with default function call",
|
||||
column: &models.Column{
|
||||
Name: "created_at",
|
||||
Type: "timestamp",
|
||||
NotNull: true,
|
||||
Default: "now()",
|
||||
},
|
||||
want: []string{"created_at,", "type:timestamp,", "default:now(),"},
|
||||
},
|
||||
{
|
||||
name: "auto increment with AutoIncrement flag",
|
||||
column: &models.Column{
|
||||
Name: "id",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
AutoIncrement: true,
|
||||
},
|
||||
want: []string{"id,", "type:bigint,", "pk,", "autoincrement,"},
|
||||
},
|
||||
{
|
||||
name: "serial type (auto-increment)",
|
||||
column: &models.Column{
|
||||
Name: "id",
|
||||
Type: "serial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
},
|
||||
want: []string{"id,", "type:serial,", "pk,", "autoincrement,"},
|
||||
},
|
||||
{
|
||||
name: "bigserial type (auto-increment)",
|
||||
column: &models.Column{
|
||||
Name: "id",
|
||||
Type: "bigserial",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
},
|
||||
want: []string{"id,", "type:bigserial,", "pk,", "autoincrement,"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -631,3 +698,23 @@ func TestTypeMapper_BuildBunTag(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeMapper_BuildBunTag_PreservesExplicitTypeModifiers(t *testing.T) {
|
||||
mapper := NewTypeMapper()
|
||||
|
||||
col := &models.Column{
|
||||
Name: "embedding",
|
||||
Type: "vector(1536)",
|
||||
Length: 1536,
|
||||
Precision: 0,
|
||||
Scale: 0,
|
||||
}
|
||||
|
||||
tag := mapper.BuildBunTag(col, nil)
|
||||
if !strings.Contains(tag, "type:vector(1536),") {
|
||||
t.Fatalf("expected explicit modifier to be preserved, got %q", tag)
|
||||
}
|
||||
if strings.Contains(tag, ")(") {
|
||||
t.Fatalf("type modifier appears duplicated in %q", tag)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,10 +62,10 @@ func (w *Writer) databaseToDBML(d *models.Database) string {
|
||||
var sb strings.Builder
|
||||
|
||||
if d.Description != "" {
|
||||
sb.WriteString(fmt.Sprintf("// %s\n", d.Description))
|
||||
fmt.Fprintf(&sb, "// %s\n", d.Description)
|
||||
}
|
||||
if d.Comment != "" {
|
||||
sb.WriteString(fmt.Sprintf("// %s\n", d.Comment))
|
||||
fmt.Fprintf(&sb, "// %s\n", d.Comment)
|
||||
}
|
||||
if d.Description != "" || d.Comment != "" {
|
||||
sb.WriteString("\n")
|
||||
@@ -94,7 +94,7 @@ func (w *Writer) schemaToDBML(schema *models.Schema) string {
|
||||
var sb strings.Builder
|
||||
|
||||
if schema.Description != "" {
|
||||
sb.WriteString(fmt.Sprintf("// Schema: %s - %s\n", schema.Name, schema.Description))
|
||||
fmt.Fprintf(&sb, "// Schema: %s - %s\n", schema.Name, schema.Description)
|
||||
}
|
||||
|
||||
for _, table := range schema.Tables {
|
||||
@@ -110,10 +110,10 @@ func (w *Writer) tableToDBML(t *models.Table) string {
|
||||
var sb strings.Builder
|
||||
|
||||
tableName := fmt.Sprintf("%s.%s", t.Schema, t.Name)
|
||||
sb.WriteString(fmt.Sprintf("Table %s {\n", tableName))
|
||||
fmt.Fprintf(&sb, "Table %s {\n", tableName)
|
||||
|
||||
for _, column := range t.Columns {
|
||||
sb.WriteString(fmt.Sprintf(" %s %s", column.Name, column.Type))
|
||||
fmt.Fprintf(&sb, " %s %s", column.Name, column.Type)
|
||||
|
||||
var attrs []string
|
||||
if column.IsPrimaryKey {
|
||||
@@ -138,11 +138,11 @@ func (w *Writer) tableToDBML(t *models.Table) string {
|
||||
}
|
||||
|
||||
if len(attrs) > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" [%s]", strings.Join(attrs, ", ")))
|
||||
fmt.Fprintf(&sb, " [%s]", strings.Join(attrs, ", "))
|
||||
}
|
||||
|
||||
if column.Comment != "" {
|
||||
sb.WriteString(fmt.Sprintf(" // %s", column.Comment))
|
||||
fmt.Fprintf(&sb, " // %s", column.Comment)
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
@@ -161,9 +161,9 @@ func (w *Writer) tableToDBML(t *models.Table) string {
|
||||
indexAttrs = append(indexAttrs, fmt.Sprintf("type: %s", index.Type))
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf(" (%s)", strings.Join(index.Columns, ", ")))
|
||||
fmt.Fprintf(&sb, " (%s)", strings.Join(index.Columns, ", "))
|
||||
if len(indexAttrs) > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" [%s]", strings.Join(indexAttrs, ", ")))
|
||||
fmt.Fprintf(&sb, " [%s]", strings.Join(indexAttrs, ", "))
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
@@ -172,7 +172,7 @@ func (w *Writer) tableToDBML(t *models.Table) string {
|
||||
|
||||
note := strings.TrimSpace(t.Description + " " + t.Comment)
|
||||
if note != "" {
|
||||
sb.WriteString(fmt.Sprintf("\n Note: '%s'\n", note))
|
||||
fmt.Fprintf(&sb, "\n Note: '%s'\n", note)
|
||||
}
|
||||
|
||||
sb.WriteString("}\n")
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -155,8 +156,15 @@ func (w *Writer) mapTableFields(table *models.Table) models.DCTXTable {
|
||||
},
|
||||
}
|
||||
|
||||
columnNames := make([]string, 0, len(table.Columns))
|
||||
for name := range table.Columns {
|
||||
columnNames = append(columnNames, name)
|
||||
}
|
||||
sort.Strings(columnNames)
|
||||
|
||||
i := 0
|
||||
for _, column := range table.Columns {
|
||||
for _, colName := range columnNames {
|
||||
column := table.Columns[colName]
|
||||
dctxTable.Fields[i] = w.mapField(column)
|
||||
i++
|
||||
}
|
||||
@@ -165,12 +173,27 @@ func (w *Writer) mapTableFields(table *models.Table) models.DCTXTable {
|
||||
}
|
||||
|
||||
func (w *Writer) mapTableKeys(table *models.Table) []models.DCTXKey {
|
||||
keys := make([]models.DCTXKey, len(table.Indexes))
|
||||
i := 0
|
||||
indexes := make([]*models.Index, 0, len(table.Indexes))
|
||||
for _, index := range table.Indexes {
|
||||
keys[i] = w.mapKey(index, table)
|
||||
i++
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
|
||||
// Stable ordering for deterministic output and test reproducibility:
|
||||
// primary keys first, then lexicographic by index name.
|
||||
sort.Slice(indexes, func(i, j int) bool {
|
||||
iPrimary := strings.HasSuffix(indexes[i].Name, "_pkey")
|
||||
jPrimary := strings.HasSuffix(indexes[j].Name, "_pkey")
|
||||
if iPrimary != jPrimary {
|
||||
return iPrimary
|
||||
}
|
||||
return indexes[i].Name < indexes[j].Name
|
||||
})
|
||||
|
||||
keys := make([]models.DCTXKey, len(indexes))
|
||||
for i, index := range indexes {
|
||||
keys[i] = w.mapKey(index, table)
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
|
||||
67
pkg/writers/doc.go
Normal file
67
pkg/writers/doc.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Package writers provides interfaces and implementations for writing database schemas
|
||||
// to various output formats and destinations.
|
||||
//
|
||||
// # Overview
|
||||
//
|
||||
// The writers package defines a common Writer interface that all format-specific writers
|
||||
// implement. This allows RelSpec to export database schemas to multiple formats including:
|
||||
// - SQL schema files (PostgreSQL, SQLite)
|
||||
// - Schema definition files (DBML, DCTX, DrawDB, GraphQL)
|
||||
// - ORM model files (GORM, Bun, Drizzle, Prisma, TypeORM)
|
||||
// - Data interchange formats (JSON, YAML)
|
||||
//
|
||||
// # Architecture
|
||||
//
|
||||
// Each writer implementation is located in its own subpackage (e.g., pkg/writers/dbml,
|
||||
// pkg/writers/pgsql) and implements the Writer interface, supporting three levels of
|
||||
// granularity:
|
||||
// - WriteDatabase() - Write complete database with all schemas
|
||||
// - WriteSchema() - Write single schema with all tables
|
||||
// - WriteTable() - Write single table with all columns and metadata
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// Writers are instantiated with WriterOptions containing destination-specific configuration:
|
||||
//
|
||||
// // Write to file
|
||||
// writer := dbml.NewWriter(&writers.WriterOptions{
|
||||
// OutputPath: "schema.dbml",
|
||||
// })
|
||||
// err := writer.WriteDatabase(db)
|
||||
//
|
||||
// // Write ORM models with package name
|
||||
// writer := gorm.NewWriter(&writers.WriterOptions{
|
||||
// OutputPath: "./models",
|
||||
// PackageName: "models",
|
||||
// })
|
||||
// err := writer.WriteDatabase(db)
|
||||
//
|
||||
// // Write with schema flattening for SQLite
|
||||
// writer := sqlite.NewWriter(&writers.WriterOptions{
|
||||
// OutputPath: "schema.sql",
|
||||
// FlattenSchema: true,
|
||||
// })
|
||||
// err := writer.WriteDatabase(db)
|
||||
//
|
||||
// # Schema Flattening
|
||||
//
|
||||
// The FlattenSchema option controls how schema-qualified table names are handled:
|
||||
// - false (default): Uses dot notation (schema.table)
|
||||
// - true: Joins with underscore (schema_table), useful for SQLite
|
||||
//
|
||||
// # Supported Formats
|
||||
//
|
||||
// - dbml: Database Markup Language files
|
||||
// - dctx: DCTX schema files
|
||||
// - drawdb: DrawDB JSON format
|
||||
// - graphql: GraphQL schema definition language
|
||||
// - json: JSON database schema
|
||||
// - yaml: YAML database schema
|
||||
// - gorm: Go GORM model structs
|
||||
// - bun: Go Bun model structs
|
||||
// - drizzle: TypeScript Drizzle ORM schemas
|
||||
// - prisma: Prisma schema language
|
||||
// - typeorm: TypeScript TypeORM entities
|
||||
// - pgsql: PostgreSQL SQL schema
|
||||
// - sqlite: SQLite SQL schema with automatic flattening
|
||||
package writers
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
)
|
||||
|
||||
// TypeMapper handles SQL to Drizzle type conversions
|
||||
@@ -18,7 +19,7 @@ func NewTypeMapper() *TypeMapper {
|
||||
// SQLTypeToDrizzle converts SQL types to Drizzle column type functions
|
||||
// Returns the Drizzle column constructor (e.g., "integer", "varchar", "text")
|
||||
func (tm *TypeMapper) SQLTypeToDrizzle(sqlType string) string {
|
||||
sqlTypeLower := strings.ToLower(sqlType)
|
||||
sqlTypeLower := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
|
||||
|
||||
// PostgreSQL type mapping to Drizzle
|
||||
typeMap := map[string]string{
|
||||
@@ -87,13 +88,6 @@ func (tm *TypeMapper) SQLTypeToDrizzle(sqlType string) string {
|
||||
return drizzleType
|
||||
}
|
||||
|
||||
// Check for partial matches (e.g., "varchar(255)" -> "varchar")
|
||||
for sqlPattern, drizzleType := range typeMap {
|
||||
if strings.HasPrefix(sqlTypeLower, sqlPattern) {
|
||||
return drizzleType
|
||||
}
|
||||
}
|
||||
|
||||
// Default to text for unknown types
|
||||
return "text"
|
||||
}
|
||||
|
||||
@@ -109,8 +109,7 @@ func NewModelData(table *models.Table, schema string, typeMapper *TypeMapper, fl
|
||||
tableName := writers.QualifiedTableName(schema, table.Name, flattenSchema)
|
||||
|
||||
// Generate model name: Model + Schema + Table (all PascalCase)
|
||||
singularTable := Singularize(table.Name)
|
||||
tablePart := SnakeCaseToPascalCase(singularTable)
|
||||
tablePart := SnakeCaseToPascalCase(table.Name)
|
||||
|
||||
// Include schema name in model name
|
||||
var modelName string
|
||||
@@ -214,6 +213,21 @@ func resolveFieldNameCollision(fieldName string) string {
|
||||
return fieldName
|
||||
}
|
||||
|
||||
// sortConstraints sorts constraints by sequence, then by name
|
||||
func sortConstraints(constraints map[string]*models.Constraint) []*models.Constraint {
|
||||
result := make([]*models.Constraint, 0, len(constraints))
|
||||
for _, c := range constraints {
|
||||
result = append(result, c)
|
||||
}
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
if result[i].Sequence > 0 && result[j].Sequence > 0 {
|
||||
return result[i].Sequence < result[j].Sequence
|
||||
}
|
||||
return result[i].Name < result[j].Name
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// sortColumns sorts columns by sequence, then by name
|
||||
func sortColumns(columns map[string]*models.Column) []*models.Column {
|
||||
result := make([]*models.Column, 0, len(columns))
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
@@ -39,14 +40,7 @@ func (tm *TypeMapper) SQLTypeToGoType(sqlType string, notNull bool) string {
|
||||
// extractBaseType extracts the base type from a SQL type string
|
||||
// Examples: varchar(100) → varchar, numeric(10,2) → numeric
|
||||
func (tm *TypeMapper) extractBaseType(sqlType string) string {
|
||||
sqlType = strings.ToLower(strings.TrimSpace(sqlType))
|
||||
|
||||
// Remove everything after '('
|
||||
if idx := strings.Index(sqlType, "("); idx > 0 {
|
||||
sqlType = sqlType[:idx]
|
||||
}
|
||||
|
||||
return sqlType
|
||||
return pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
|
||||
}
|
||||
|
||||
// baseGoType returns the base Go type for a SQL type (not null)
|
||||
@@ -158,10 +152,10 @@ func (tm *TypeMapper) nullableGoType(sqlType string) string {
|
||||
"decimal": tm.sqlTypesAlias + ".SqlFloat64",
|
||||
|
||||
// Date/Time types
|
||||
"timestamp": tm.sqlTypesAlias + ".SqlTime",
|
||||
"timestamp without time zone": tm.sqlTypesAlias + ".SqlTime",
|
||||
"timestamp with time zone": tm.sqlTypesAlias + ".SqlTime",
|
||||
"timestamptz": tm.sqlTypesAlias + ".SqlTime",
|
||||
"timestamp": tm.sqlTypesAlias + ".SqlTimeStamp",
|
||||
"timestamp without time zone": tm.sqlTypesAlias + ".SqlTimeStamp",
|
||||
"timestamp with time zone": tm.sqlTypesAlias + ".SqlTimeStamp",
|
||||
"timestamptz": tm.sqlTypesAlias + ".SqlTimeStamp",
|
||||
"date": tm.sqlTypesAlias + ".SqlDate",
|
||||
"time": tm.sqlTypesAlias + ".SqlTime",
|
||||
"time without time zone": tm.sqlTypesAlias + ".SqlTime",
|
||||
@@ -209,9 +203,10 @@ func (tm *TypeMapper) BuildGormTag(column *models.Column, table *models.Table) s
|
||||
// Include length, precision, scale if present
|
||||
// Sanitize type to remove backticks
|
||||
typeStr := writers.SanitizeStructTagValue(column.Type)
|
||||
if column.Length > 0 {
|
||||
hasExplicitTypeModifier := pgsql.HasExplicitTypeModifier(typeStr)
|
||||
if !hasExplicitTypeModifier && column.Length > 0 {
|
||||
typeStr = fmt.Sprintf("%s(%d)", typeStr, column.Length)
|
||||
} else if column.Precision > 0 {
|
||||
} else if !hasExplicitTypeModifier && column.Precision > 0 {
|
||||
if column.Scale > 0 {
|
||||
typeStr = fmt.Sprintf("%s(%d,%d)", typeStr, column.Precision, column.Scale)
|
||||
} else {
|
||||
@@ -238,8 +233,8 @@ func (tm *TypeMapper) BuildGormTag(column *models.Column, table *models.Table) s
|
||||
|
||||
// Default value
|
||||
if column.Default != nil {
|
||||
// Sanitize default value to remove backticks
|
||||
safeDefault := writers.SanitizeStructTagValue(fmt.Sprintf("%v", column.Default))
|
||||
// Sanitize default value to remove backticks, then quote based on column type
|
||||
safeDefault := writers.QuoteDefaultValue(writers.SanitizeStructTagValue(fmt.Sprintf("%v", column.Default)), column.Type)
|
||||
parts = append(parts, fmt.Sprintf("default:%s", safeDefault))
|
||||
}
|
||||
|
||||
|
||||
@@ -236,7 +236,7 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
|
||||
usedFieldNames := make(map[string]int)
|
||||
|
||||
// For each foreign key in this table, add a belongs-to relationship
|
||||
for _, constraint := range table.Constraints {
|
||||
for _, constraint := range sortConstraints(table.Constraints) {
|
||||
if constraint.Type != models.ForeignKeyConstraint {
|
||||
continue
|
||||
}
|
||||
@@ -269,7 +269,7 @@ func (w *Writer) addRelationshipFields(modelData *ModelData, table *models.Table
|
||||
continue // Skip self
|
||||
}
|
||||
|
||||
for _, constraint := range otherTable.Constraints {
|
||||
for _, constraint := range sortConstraints(otherTable.Constraints) {
|
||||
if constraint.Type != models.ForeignKeyConstraint {
|
||||
continue
|
||||
}
|
||||
@@ -312,8 +312,7 @@ func (w *Writer) findTable(schemaName, tableName string, db *models.Database) *m
|
||||
|
||||
// getModelName generates the model name from schema and table name
|
||||
func (w *Writer) getModelName(schemaName, tableName string) string {
|
||||
singular := Singularize(tableName)
|
||||
tablePart := SnakeCaseToPascalCase(singular)
|
||||
tablePart := SnakeCaseToPascalCase(tableName)
|
||||
|
||||
// Include schema name in model name
|
||||
var modelName string
|
||||
|
||||
@@ -14,12 +14,12 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
// Create a simple table
|
||||
table := models.InitTable("users", "public")
|
||||
table.Columns["id"] = &models.Column{
|
||||
Name: "id",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
Name: "id",
|
||||
Type: "bigint",
|
||||
NotNull: true,
|
||||
IsPrimaryKey: true,
|
||||
AutoIncrement: true,
|
||||
Sequence: 1,
|
||||
Sequence: 1,
|
||||
}
|
||||
table.Columns["email"] = &models.Column{
|
||||
Name: "email",
|
||||
@@ -66,7 +66,7 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
// Verify key elements are present
|
||||
expectations := []string{
|
||||
"package models",
|
||||
"type ModelPublicUser struct",
|
||||
"type ModelPublicUsers struct",
|
||||
"ID",
|
||||
"int64",
|
||||
"Email",
|
||||
@@ -75,9 +75,9 @@ func TestWriter_WriteTable(t *testing.T) {
|
||||
"time.Time",
|
||||
"gorm:\"column:id",
|
||||
"gorm:\"column:email",
|
||||
"func (m ModelPublicUser) TableName() string",
|
||||
"func (m ModelPublicUsers) TableName() string",
|
||||
"return \"public.users\"",
|
||||
"func (m ModelPublicUser) GetID() int64",
|
||||
"func (m ModelPublicUsers) GetID() int64",
|
||||
}
|
||||
|
||||
for _, expected := range expectations {
|
||||
@@ -444,10 +444,10 @@ func TestWriter_MultipleHasManyRelationships(t *testing.T) {
|
||||
|
||||
// Verify all has-many relationships have unique names
|
||||
hasManyExpectations := []string{
|
||||
"RelRIDAPIProviderOrgLogins", // Has many via Login
|
||||
"RelRIDAPIProviderOrgLogins", // Has many via Login
|
||||
"RelRIDAPIProviderOrgFilepointers", // Has many via Filepointer
|
||||
"RelRIDAPIProviderOrgAPIEvents", // Has many via APIEvent
|
||||
"RelRIDOwner", // Belongs to via rid_owner
|
||||
"RelRIDAPIProviderOrgAPIEvents", // Has many via APIEvent
|
||||
"RelRIDOwner", // Belongs to via rid_owner
|
||||
}
|
||||
|
||||
for _, exp := range hasManyExpectations {
|
||||
@@ -655,7 +655,7 @@ func TestTypeMapper_SQLTypeToGoType(t *testing.T) {
|
||||
{"varchar", true, "string"},
|
||||
{"varchar", false, "sql_types.SqlString"},
|
||||
{"timestamp", true, "time.Time"},
|
||||
{"timestamp", false, "sql_types.SqlTime"},
|
||||
{"timestamp", false, "sql_types.SqlTimeStamp"},
|
||||
{"boolean", true, "bool"},
|
||||
{"boolean", false, "sql_types.SqlBool"},
|
||||
}
|
||||
@@ -669,3 +669,23 @@ func TestTypeMapper_SQLTypeToGoType(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeMapper_BuildGormTag_PreservesExplicitTypeModifiers(t *testing.T) {
|
||||
mapper := NewTypeMapper()
|
||||
|
||||
col := &models.Column{
|
||||
Name: "embedding",
|
||||
Type: "vector(1536)",
|
||||
Length: 1536,
|
||||
Precision: 0,
|
||||
Scale: 0,
|
||||
}
|
||||
|
||||
tag := mapper.BuildGormTag(col, nil)
|
||||
if !strings.Contains(tag, "type:vector(1536)") {
|
||||
t.Fatalf("expected explicit modifier to be preserved, got %q", tag)
|
||||
}
|
||||
if strings.Contains(tag, ")(") {
|
||||
t.Fatalf("type modifier appears duplicated in %q", tag)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
)
|
||||
|
||||
func (w *Writer) sqlTypeToGraphQL(sqlType string, column *models.Column, table *models.Table, schema *models.Schema) string {
|
||||
@@ -33,12 +34,11 @@ func (w *Writer) sqlTypeToGraphQL(sqlType string, column *models.Column, table *
|
||||
}
|
||||
|
||||
// Standard type mappings
|
||||
baseType := strings.Split(sqlType, "(")[0] // Remove length/precision
|
||||
baseType = strings.TrimSpace(baseType)
|
||||
baseType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
|
||||
|
||||
// Handle array types
|
||||
if strings.HasSuffix(baseType, "[]") {
|
||||
elemType := strings.TrimSuffix(baseType, "[]")
|
||||
if pgsql.IsArrayType(sqlType) {
|
||||
elemType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(pgsql.ElementType(sqlType)))
|
||||
gqlType := w.mapBaseTypeToGraphQL(elemType)
|
||||
return "[" + gqlType + "]"
|
||||
}
|
||||
@@ -108,8 +108,7 @@ func (w *Writer) sqlTypeToCustomScalar(sqlType string) string {
|
||||
"date": "Date",
|
||||
}
|
||||
|
||||
baseType := strings.Split(sqlType, "(")[0]
|
||||
baseType = strings.TrimSpace(baseType)
|
||||
baseType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
|
||||
|
||||
if scalar, ok := scalarMap[baseType]; ok {
|
||||
return scalar
|
||||
@@ -132,8 +131,7 @@ func (w *Writer) isIntegerType(sqlType string) bool {
|
||||
"smallserial": true,
|
||||
}
|
||||
|
||||
baseType := strings.Split(sqlType, "(")[0]
|
||||
baseType = strings.TrimSpace(baseType)
|
||||
baseType := pgsql.CanonicalizeBaseType(pgsql.ExtractBaseTypeLower(sqlType))
|
||||
|
||||
return intTypes[baseType]
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func (w *Writer) databaseToGraphQL(db *models.Database) string {
|
||||
if w.shouldIncludeComments() {
|
||||
sb.WriteString("# Generated GraphQL Schema\n")
|
||||
if db.Name != "" {
|
||||
sb.WriteString(fmt.Sprintf("# Database: %s\n", db.Name))
|
||||
fmt.Fprintf(&sb, "# Database: %s\n", db.Name)
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (w *Writer) databaseToGraphQL(db *models.Database) string {
|
||||
scalars := w.collectCustomScalars(db)
|
||||
if len(scalars) > 0 {
|
||||
for _, scalar := range scalars {
|
||||
sb.WriteString(fmt.Sprintf("scalar %s\n", scalar))
|
||||
fmt.Fprintf(&sb, "scalar %s\n", scalar)
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
@@ -176,9 +176,9 @@ func (w *Writer) isJoinTable(table *models.Table) bool {
|
||||
func (w *Writer) enumToGraphQL(enum *models.Enum) string {
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString(fmt.Sprintf("enum %s {\n", enum.Name))
|
||||
fmt.Fprintf(&sb, "enum %s {\n", enum.Name)
|
||||
for _, value := range enum.Values {
|
||||
sb.WriteString(fmt.Sprintf(" %s\n", value))
|
||||
fmt.Fprintf(&sb, " %s\n", value)
|
||||
}
|
||||
sb.WriteString("}\n")
|
||||
|
||||
@@ -197,10 +197,10 @@ func (w *Writer) tableToGraphQL(table *models.Table, db *models.Database, schema
|
||||
if desc == "" {
|
||||
desc = table.Comment
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("# %s\n", desc))
|
||||
fmt.Fprintf(&sb, "# %s\n", desc)
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf("type %s {\n", typeName))
|
||||
fmt.Fprintf(&sb, "type %s {\n", typeName)
|
||||
|
||||
// Collect and categorize fields
|
||||
var idFields, scalarFields, relationFields []string
|
||||
|
||||
130
pkg/writers/mssql/README.md
Normal file
130
pkg/writers/mssql/README.md
Normal file
@@ -0,0 +1,130 @@
|
||||
# MSSQL Writer
|
||||
|
||||
Generates Microsoft SQL Server DDL (Data Definition Language) from database schema models.
|
||||
|
||||
## Features
|
||||
|
||||
- **DDL Generation**: Generates complete SQL scripts for creating MSSQL schema
|
||||
- **Schema Support**: Creates multiple schemas with proper naming
|
||||
- **Bracket Notation**: Uses [schema].[table] bracket notation for identifiers
|
||||
- **Identity Columns**: Generates IDENTITY(1,1) for auto-increment columns
|
||||
- **Constraints**: Generates primary keys, foreign keys, unique, and check constraints
|
||||
- **Indexes**: Creates indexes with unique support
|
||||
- **Extended Properties**: Uses sp_addextendedproperty for comments
|
||||
- **Direct Execution**: Can directly execute DDL on MSSQL database
|
||||
- **Schema Flattening**: Optional schema flattening for compatibility
|
||||
|
||||
## Features by Phase
|
||||
|
||||
1. **Phase 1**: Create schemas
|
||||
2. **Phase 2**: Create tables with columns, identity, and defaults
|
||||
3. **Phase 3**: Add primary key constraints
|
||||
4. **Phase 4**: Create indexes
|
||||
5. **Phase 5**: Add unique constraints
|
||||
6. **Phase 6**: Add check constraints
|
||||
7. **Phase 7**: Add foreign key constraints
|
||||
8. **Phase 8**: Add extended properties (comments)
|
||||
|
||||
## Type Mappings
|
||||
|
||||
| Canonical Type | MSSQL Type |
|
||||
|----------------|-----------|
|
||||
| int | INT |
|
||||
| int64 | BIGINT |
|
||||
| int16 | SMALLINT |
|
||||
| int8 | TINYINT |
|
||||
| bool | BIT |
|
||||
| float32 | REAL |
|
||||
| float64 | FLOAT |
|
||||
| decimal | NUMERIC |
|
||||
| string | NVARCHAR(255) |
|
||||
| text | NVARCHAR(MAX) |
|
||||
| timestamp | DATETIME2 |
|
||||
| timestamptz | DATETIMEOFFSET |
|
||||
| uuid | UNIQUEIDENTIFIER |
|
||||
| bytea | VARBINARY(MAX) |
|
||||
| date | DATE |
|
||||
| time | TIME |
|
||||
|
||||
## Usage
|
||||
|
||||
### Generate SQL File
|
||||
|
||||
```go
|
||||
import "git.warky.dev/wdevs/relspecgo/pkg/writers/mssql"
|
||||
import "git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
|
||||
writer := mssql.NewWriter(&writers.WriterOptions{
|
||||
OutputPath: "schema.sql",
|
||||
FlattenSchema: false,
|
||||
})
|
||||
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Direct Database Execution
|
||||
|
||||
```go
|
||||
writer := mssql.NewWriter(&writers.WriterOptions{
|
||||
OutputPath: "",
|
||||
Metadata: map[string]interface{}{
|
||||
"connection_string": "sqlserver://sa:password@localhost/newdb",
|
||||
},
|
||||
})
|
||||
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### CLI Usage
|
||||
|
||||
Generate SQL file:
|
||||
```bash
|
||||
relspec convert --from json --from-path schema.json \
|
||||
--to mssql --to-path schema.sql
|
||||
```
|
||||
|
||||
Execute directly to database:
|
||||
```bash
|
||||
relspec convert --from json --from-path schema.json \
|
||||
--to mssql \
|
||||
--metadata '{"connection_string":"sqlserver://sa:password@localhost/mydb"}'
|
||||
```
|
||||
|
||||
## Default Values
|
||||
|
||||
The writer supports several default value patterns:
|
||||
- Functions: `GETDATE()`, `CURRENT_TIMESTAMP()`
|
||||
- Literals: strings wrapped in quotes, numbers, booleans (0/1 for BIT)
|
||||
- CAST expressions
|
||||
|
||||
## Comments/Extended Properties
|
||||
|
||||
Table and column descriptions are stored as MS_Description extended properties:
|
||||
|
||||
```sql
|
||||
EXEC sp_addextendedproperty
|
||||
@name = 'MS_Description',
|
||||
@value = 'Table description here',
|
||||
@level0type = 'SCHEMA', @level0name = 'dbo',
|
||||
@level1type = 'TABLE', @level1name = 'my_table';
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Run tests with:
|
||||
```bash
|
||||
go test ./pkg/writers/mssql/...
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
- Views are not currently supported in the writer
|
||||
- Sequences are not supported (MSSQL uses IDENTITY instead)
|
||||
- Partitioning and advanced features are not supported
|
||||
- Generated DDL assumes no triggers or computed columns
|
||||
579
pkg/writers/mssql/writer.go
Normal file
579
pkg/writers/mssql/writer.go
Normal file
@@ -0,0 +1,579 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
_ "github.com/microsoft/go-mssqldb" // MSSQL driver
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/mssql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
// Writer implements the Writer interface for MSSQL SQL output
|
||||
type Writer struct {
|
||||
options *writers.WriterOptions
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
// NewWriter creates a new MSSQL SQL writer
|
||||
func NewWriter(options *writers.WriterOptions) *Writer {
|
||||
return &Writer{
|
||||
options: options,
|
||||
}
|
||||
}
|
||||
|
||||
// qualTable returns a schema-qualified name using bracket notation
|
||||
func (w *Writer) qualTable(schema, name string) string {
|
||||
if w.options.FlattenSchema {
|
||||
return fmt.Sprintf("[%s]", name)
|
||||
}
|
||||
return fmt.Sprintf("[%s].[%s]", schema, name)
|
||||
}
|
||||
|
||||
// WriteDatabase writes the entire database schema as SQL
|
||||
func (w *Writer) WriteDatabase(db *models.Database) error {
|
||||
// Check if we should execute SQL directly on a database
|
||||
if connString, ok := w.options.Metadata["connection_string"].(string); ok && connString != "" {
|
||||
return w.executeDatabaseSQL(db, connString)
|
||||
}
|
||||
|
||||
var writer io.Writer
|
||||
var file *os.File
|
||||
var err error
|
||||
|
||||
// Use existing writer if already set (for testing)
|
||||
if w.writer != nil {
|
||||
writer = w.writer
|
||||
} else if w.options.OutputPath != "" {
|
||||
// Determine output destination
|
||||
file, err = os.Create(w.options.OutputPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
writer = file
|
||||
} else {
|
||||
writer = os.Stdout
|
||||
}
|
||||
|
||||
w.writer = writer
|
||||
|
||||
// Write header comment
|
||||
fmt.Fprintf(w.writer, "-- MSSQL Database Schema\n")
|
||||
fmt.Fprintf(w.writer, "-- Database: %s\n", db.Name)
|
||||
fmt.Fprintf(w.writer, "-- Generated by RelSpec\n\n")
|
||||
|
||||
// Process each schema in the database
|
||||
for _, schema := range db.Schemas {
|
||||
if err := w.WriteSchema(schema); err != nil {
|
||||
return fmt.Errorf("failed to write schema %s: %w", schema.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteSchema writes a single schema and all its tables
|
||||
func (w *Writer) WriteSchema(schema *models.Schema) error {
|
||||
if w.writer == nil {
|
||||
w.writer = os.Stdout
|
||||
}
|
||||
|
||||
// Phase 1: Create schema (skip dbo schema and when flattening)
|
||||
if schema.Name != "dbo" && !w.options.FlattenSchema {
|
||||
fmt.Fprintf(w.writer, "-- Schema: %s\n", schema.Name)
|
||||
fmt.Fprintf(w.writer, "CREATE SCHEMA [%s];\n\n", schema.Name)
|
||||
}
|
||||
|
||||
// Phase 2: Create tables with columns
|
||||
fmt.Fprintf(w.writer, "-- Tables for schema: %s\n", schema.Name)
|
||||
for _, table := range schema.Tables {
|
||||
if err := w.writeCreateTable(schema, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 3: Primary keys
|
||||
fmt.Fprintf(w.writer, "-- Primary keys for schema: %s\n", schema.Name)
|
||||
for _, table := range schema.Tables {
|
||||
if err := w.writePrimaryKey(schema, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 4: Indexes
|
||||
fmt.Fprintf(w.writer, "-- Indexes for schema: %s\n", schema.Name)
|
||||
for _, table := range schema.Tables {
|
||||
if err := w.writeIndexes(schema, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 5: Unique constraints
|
||||
fmt.Fprintf(w.writer, "-- Unique constraints for schema: %s\n", schema.Name)
|
||||
for _, table := range schema.Tables {
|
||||
if err := w.writeUniqueConstraints(schema, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 6: Check constraints
|
||||
fmt.Fprintf(w.writer, "-- Check constraints for schema: %s\n", schema.Name)
|
||||
for _, table := range schema.Tables {
|
||||
if err := w.writeCheckConstraints(schema, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 7: Foreign keys
|
||||
fmt.Fprintf(w.writer, "-- Foreign keys for schema: %s\n", schema.Name)
|
||||
for _, table := range schema.Tables {
|
||||
if err := w.writeForeignKeys(schema, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 8: Comments
|
||||
fmt.Fprintf(w.writer, "-- Comments for schema: %s\n", schema.Name)
|
||||
for _, table := range schema.Tables {
|
||||
if err := w.writeComments(schema, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteTable writes a single table with all its elements
|
||||
func (w *Writer) WriteTable(table *models.Table) error {
|
||||
if w.writer == nil {
|
||||
w.writer = os.Stdout
|
||||
}
|
||||
|
||||
// Create a temporary schema with just this table
|
||||
schema := models.InitSchema(table.Schema)
|
||||
schema.Tables = append(schema.Tables, table)
|
||||
|
||||
return w.WriteSchema(schema)
|
||||
}
|
||||
|
||||
// writeCreateTable generates CREATE TABLE statement
|
||||
func (w *Writer) writeCreateTable(schema *models.Schema, table *models.Table) error {
|
||||
fmt.Fprintf(w.writer, "CREATE TABLE %s (\n", w.qualTable(schema.Name, table.Name))
|
||||
|
||||
// Sort columns by sequence
|
||||
columns := getSortedColumns(table.Columns)
|
||||
columnDefs := make([]string, 0, len(columns))
|
||||
|
||||
for _, col := range columns {
|
||||
def := w.generateColumnDefinition(col)
|
||||
columnDefs = append(columnDefs, " "+def)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w.writer, "%s\n", strings.Join(columnDefs, ",\n"))
|
||||
fmt.Fprintf(w.writer, ");\n\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateColumnDefinition generates MSSQL column definition
|
||||
func (w *Writer) generateColumnDefinition(col *models.Column) string {
|
||||
parts := []string{fmt.Sprintf("[%s]", col.Name)}
|
||||
|
||||
// Type with length/precision
|
||||
baseType := mssql.ConvertCanonicalToMSSQL(col.Type)
|
||||
typeStr := baseType
|
||||
|
||||
// Handle specific type parameters for MSSQL
|
||||
if col.Length > 0 && col.Precision == 0 {
|
||||
// String types with length - override the default length from baseType
|
||||
if strings.HasPrefix(baseType, "NVARCHAR") || strings.HasPrefix(baseType, "VARCHAR") ||
|
||||
strings.HasPrefix(baseType, "CHAR") || strings.HasPrefix(baseType, "NCHAR") {
|
||||
if col.Length > 0 && col.Length < 8000 {
|
||||
// Extract base type without length specification
|
||||
baseName := strings.Split(baseType, "(")[0]
|
||||
typeStr = fmt.Sprintf("%s(%d)", baseName, col.Length)
|
||||
}
|
||||
}
|
||||
} else if col.Precision > 0 {
|
||||
// Numeric types with precision/scale
|
||||
baseName := strings.Split(baseType, "(")[0]
|
||||
if col.Scale > 0 {
|
||||
typeStr = fmt.Sprintf("%s(%d,%d)", baseName, col.Precision, col.Scale)
|
||||
} else {
|
||||
typeStr = fmt.Sprintf("%s(%d)", baseName, col.Precision)
|
||||
}
|
||||
}
|
||||
|
||||
parts = append(parts, typeStr)
|
||||
|
||||
// IDENTITY for auto-increment
|
||||
if col.AutoIncrement {
|
||||
parts = append(parts, "IDENTITY(1,1)")
|
||||
}
|
||||
|
||||
// NOT NULL
|
||||
if col.NotNull {
|
||||
parts = append(parts, "NOT NULL")
|
||||
}
|
||||
|
||||
// DEFAULT
|
||||
if col.Default != nil {
|
||||
switch v := col.Default.(type) {
|
||||
case string:
|
||||
cleanDefault := stripBackticks(v)
|
||||
if strings.HasPrefix(strings.ToUpper(cleanDefault), "GETDATE") ||
|
||||
strings.HasPrefix(strings.ToUpper(cleanDefault), "CURRENT_") {
|
||||
parts = append(parts, fmt.Sprintf("DEFAULT %s", cleanDefault))
|
||||
} else if cleanDefault == "true" || cleanDefault == "false" {
|
||||
if cleanDefault == "true" {
|
||||
parts = append(parts, "DEFAULT 1")
|
||||
} else {
|
||||
parts = append(parts, "DEFAULT 0")
|
||||
}
|
||||
} else {
|
||||
parts = append(parts, fmt.Sprintf("DEFAULT '%s'", escapeQuote(cleanDefault)))
|
||||
}
|
||||
case bool:
|
||||
if v {
|
||||
parts = append(parts, "DEFAULT 1")
|
||||
} else {
|
||||
parts = append(parts, "DEFAULT 0")
|
||||
}
|
||||
case int, int64:
|
||||
parts = append(parts, fmt.Sprintf("DEFAULT %v", v))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(parts, " ")
|
||||
}
|
||||
|
||||
// writePrimaryKey generates ALTER TABLE statement for primary key
|
||||
func (w *Writer) writePrimaryKey(schema *models.Schema, table *models.Table) error {
|
||||
// Find primary key constraint
|
||||
var pkConstraint *models.Constraint
|
||||
for _, constraint := range table.Constraints {
|
||||
if constraint.Type == models.PrimaryKeyConstraint {
|
||||
pkConstraint = constraint
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var columnNames []string
|
||||
pkName := fmt.Sprintf("PK_%s_%s", schema.Name, table.Name)
|
||||
|
||||
if pkConstraint != nil {
|
||||
pkName = pkConstraint.Name
|
||||
columnNames = make([]string, 0, len(pkConstraint.Columns))
|
||||
for _, colName := range pkConstraint.Columns {
|
||||
columnNames = append(columnNames, fmt.Sprintf("[%s]", colName))
|
||||
}
|
||||
} else {
|
||||
// Check for columns with IsPrimaryKey = true
|
||||
for _, col := range table.Columns {
|
||||
if col.IsPrimaryKey {
|
||||
columnNames = append(columnNames, fmt.Sprintf("[%s]", col.Name))
|
||||
}
|
||||
}
|
||||
sort.Strings(columnNames)
|
||||
}
|
||||
|
||||
if len(columnNames) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(w.writer, "ALTER TABLE %s ADD CONSTRAINT [%s] PRIMARY KEY (%s);\n\n",
|
||||
w.qualTable(schema.Name, table.Name), pkName, strings.Join(columnNames, ", "))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeIndexes generates CREATE INDEX statements
|
||||
func (w *Writer) writeIndexes(schema *models.Schema, table *models.Table) error {
|
||||
// Sort indexes by name
|
||||
indexNames := make([]string, 0, len(table.Indexes))
|
||||
for name := range table.Indexes {
|
||||
indexNames = append(indexNames, name)
|
||||
}
|
||||
sort.Strings(indexNames)
|
||||
|
||||
for _, name := range indexNames {
|
||||
index := table.Indexes[name]
|
||||
|
||||
// Skip if it's a primary key index
|
||||
if strings.HasPrefix(strings.ToLower(index.Name), "pk_") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Build column list
|
||||
columnExprs := make([]string, 0, len(index.Columns))
|
||||
for _, colName := range index.Columns {
|
||||
columnExprs = append(columnExprs, fmt.Sprintf("[%s]", colName))
|
||||
}
|
||||
|
||||
if len(columnExprs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
unique := ""
|
||||
if index.Unique {
|
||||
unique = "UNIQUE "
|
||||
}
|
||||
|
||||
fmt.Fprintf(w.writer, "CREATE %sINDEX [%s] ON %s (%s);\n\n",
|
||||
unique, index.Name, w.qualTable(schema.Name, table.Name), strings.Join(columnExprs, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeUniqueConstraints generates ALTER TABLE statements for unique constraints
|
||||
func (w *Writer) writeUniqueConstraints(schema *models.Schema, table *models.Table) error {
|
||||
// Sort constraints by name
|
||||
constraintNames := make([]string, 0)
|
||||
for name, constraint := range table.Constraints {
|
||||
if constraint.Type == models.UniqueConstraint {
|
||||
constraintNames = append(constraintNames, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(constraintNames)
|
||||
|
||||
for _, name := range constraintNames {
|
||||
constraint := table.Constraints[name]
|
||||
|
||||
// Build column list
|
||||
columnExprs := make([]string, 0, len(constraint.Columns))
|
||||
for _, colName := range constraint.Columns {
|
||||
columnExprs = append(columnExprs, fmt.Sprintf("[%s]", colName))
|
||||
}
|
||||
|
||||
if len(columnExprs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(w.writer, "ALTER TABLE %s ADD CONSTRAINT [%s] UNIQUE (%s);\n\n",
|
||||
w.qualTable(schema.Name, table.Name), constraint.Name, strings.Join(columnExprs, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeCheckConstraints generates ALTER TABLE statements for check constraints
|
||||
func (w *Writer) writeCheckConstraints(schema *models.Schema, table *models.Table) error {
|
||||
// Sort constraints by name
|
||||
constraintNames := make([]string, 0)
|
||||
for name, constraint := range table.Constraints {
|
||||
if constraint.Type == models.CheckConstraint {
|
||||
constraintNames = append(constraintNames, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(constraintNames)
|
||||
|
||||
for _, name := range constraintNames {
|
||||
constraint := table.Constraints[name]
|
||||
|
||||
if constraint.Expression == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(w.writer, "ALTER TABLE %s ADD CONSTRAINT [%s] CHECK (%s);\n\n",
|
||||
w.qualTable(schema.Name, table.Name), constraint.Name, constraint.Expression)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeForeignKeys generates ALTER TABLE statements for foreign keys
|
||||
func (w *Writer) writeForeignKeys(schema *models.Schema, table *models.Table) error {
|
||||
// Process foreign key constraints
|
||||
constraintNames := make([]string, 0)
|
||||
for name, constraint := range table.Constraints {
|
||||
if constraint.Type == models.ForeignKeyConstraint {
|
||||
constraintNames = append(constraintNames, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(constraintNames)
|
||||
|
||||
for _, name := range constraintNames {
|
||||
constraint := table.Constraints[name]
|
||||
|
||||
// Build column lists
|
||||
sourceColumns := make([]string, 0, len(constraint.Columns))
|
||||
for _, colName := range constraint.Columns {
|
||||
sourceColumns = append(sourceColumns, fmt.Sprintf("[%s]", colName))
|
||||
}
|
||||
|
||||
targetColumns := make([]string, 0, len(constraint.ReferencedColumns))
|
||||
for _, colName := range constraint.ReferencedColumns {
|
||||
targetColumns = append(targetColumns, fmt.Sprintf("[%s]", colName))
|
||||
}
|
||||
|
||||
if len(sourceColumns) == 0 || len(targetColumns) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
refSchema := constraint.ReferencedSchema
|
||||
if refSchema == "" {
|
||||
refSchema = schema.Name
|
||||
}
|
||||
|
||||
onDelete := "NO ACTION"
|
||||
if constraint.OnDelete != "" {
|
||||
onDelete = strings.ToUpper(constraint.OnDelete)
|
||||
}
|
||||
|
||||
onUpdate := "NO ACTION"
|
||||
if constraint.OnUpdate != "" {
|
||||
onUpdate = strings.ToUpper(constraint.OnUpdate)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w.writer, "ALTER TABLE %s ADD CONSTRAINT [%s] FOREIGN KEY (%s)\n",
|
||||
w.qualTable(schema.Name, table.Name), constraint.Name, strings.Join(sourceColumns, ", "))
|
||||
fmt.Fprintf(w.writer, " REFERENCES %s (%s)\n",
|
||||
w.qualTable(refSchema, constraint.ReferencedTable), strings.Join(targetColumns, ", "))
|
||||
fmt.Fprintf(w.writer, " ON DELETE %s ON UPDATE %s;\n\n",
|
||||
onDelete, onUpdate)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeComments generates EXEC sp_addextendedproperty statements for table and column descriptions
|
||||
func (w *Writer) writeComments(schema *models.Schema, table *models.Table) error {
|
||||
// Table comment
|
||||
if table.Description != "" {
|
||||
fmt.Fprintf(w.writer, "EXEC sp_addextendedproperty\n")
|
||||
fmt.Fprintf(w.writer, " @name = 'MS_Description',\n")
|
||||
fmt.Fprintf(w.writer, " @value = '%s',\n", escapeQuote(table.Description))
|
||||
fmt.Fprintf(w.writer, " @level0type = 'SCHEMA', @level0name = '%s',\n", schema.Name)
|
||||
fmt.Fprintf(w.writer, " @level1type = 'TABLE', @level1name = '%s';\n\n", table.Name)
|
||||
}
|
||||
|
||||
// Column comments
|
||||
for _, col := range getSortedColumns(table.Columns) {
|
||||
if col.Description != "" {
|
||||
fmt.Fprintf(w.writer, "EXEC sp_addextendedproperty\n")
|
||||
fmt.Fprintf(w.writer, " @name = 'MS_Description',\n")
|
||||
fmt.Fprintf(w.writer, " @value = '%s',\n", escapeQuote(col.Description))
|
||||
fmt.Fprintf(w.writer, " @level0type = 'SCHEMA', @level0name = '%s',\n", schema.Name)
|
||||
fmt.Fprintf(w.writer, " @level1type = 'TABLE', @level1name = '%s',\n", table.Name)
|
||||
fmt.Fprintf(w.writer, " @level2type = 'COLUMN', @level2name = '%s';\n\n", col.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeDatabaseSQL executes SQL statements directly on an MSSQL database
|
||||
func (w *Writer) executeDatabaseSQL(db *models.Database, connString string) error {
|
||||
// Generate SQL statements
|
||||
statements := []string{}
|
||||
statements = append(statements, "-- MSSQL Database Schema")
|
||||
statements = append(statements, fmt.Sprintf("-- Database: %s", db.Name))
|
||||
statements = append(statements, "-- Generated by RelSpec")
|
||||
|
||||
for _, schema := range db.Schemas {
|
||||
if err := w.generateSchemaStatements(schema, &statements); err != nil {
|
||||
return fmt.Errorf("failed to generate statements for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Connect to database
|
||||
dbConn, err := sql.Open("mssql", connString)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
defer dbConn.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
if err = dbConn.PingContext(ctx); err != nil {
|
||||
return fmt.Errorf("failed to ping database: %w", err)
|
||||
}
|
||||
|
||||
// Execute statements
|
||||
executedCount := 0
|
||||
for i, stmt := range statements {
|
||||
stmtTrimmed := strings.TrimSpace(stmt)
|
||||
|
||||
// Skip comments and empty statements
|
||||
if strings.HasPrefix(stmtTrimmed, "--") || stmtTrimmed == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Executing statement %d/%d...\n", i+1, len(statements))
|
||||
|
||||
_, execErr := dbConn.ExecContext(ctx, stmt)
|
||||
if execErr != nil {
|
||||
fmt.Fprintf(os.Stderr, "⚠ Warning: Statement failed: %v\n", execErr)
|
||||
continue
|
||||
}
|
||||
|
||||
executedCount++
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "✓ Successfully executed %d statements\n", executedCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateSchemaStatements generates SQL statements for a schema
|
||||
func (w *Writer) generateSchemaStatements(schema *models.Schema, statements *[]string) error {
|
||||
// Phase 1: Create schema
|
||||
if schema.Name != "dbo" && !w.options.FlattenSchema {
|
||||
*statements = append(*statements, fmt.Sprintf("-- Schema: %s", schema.Name))
|
||||
*statements = append(*statements, fmt.Sprintf("CREATE SCHEMA [%s];", schema.Name))
|
||||
}
|
||||
|
||||
// Phase 2: Create tables
|
||||
*statements = append(*statements, fmt.Sprintf("-- Tables for schema: %s", schema.Name))
|
||||
for _, table := range schema.Tables {
|
||||
createTableSQL := fmt.Sprintf("CREATE TABLE %s (", w.qualTable(schema.Name, table.Name))
|
||||
columnDefs := make([]string, 0)
|
||||
|
||||
columns := getSortedColumns(table.Columns)
|
||||
for _, col := range columns {
|
||||
def := w.generateColumnDefinition(col)
|
||||
columnDefs = append(columnDefs, " "+def)
|
||||
}
|
||||
|
||||
createTableSQL += "\n" + strings.Join(columnDefs, ",\n") + "\n)"
|
||||
*statements = append(*statements, createTableSQL)
|
||||
}
|
||||
|
||||
// Phase 3-7: Constraints and indexes will be added by WriteSchema logic
|
||||
// For now, just create tables
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
// getSortedColumns returns columns sorted by sequence
|
||||
func getSortedColumns(columns map[string]*models.Column) []*models.Column {
|
||||
names := make([]string, 0, len(columns))
|
||||
for name := range columns {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
sorted := make([]*models.Column, 0, len(columns))
|
||||
for _, name := range names {
|
||||
sorted = append(sorted, columns[name])
|
||||
}
|
||||
return sorted
|
||||
}
|
||||
|
||||
// escapeQuote escapes single quotes in strings for SQL
|
||||
func escapeQuote(s string) string {
|
||||
return strings.ReplaceAll(s, "'", "''")
|
||||
}
|
||||
|
||||
// stripBackticks removes backticks from SQL expressions
|
||||
func stripBackticks(s string) string {
|
||||
return strings.ReplaceAll(s, "`", "")
|
||||
}
|
||||
205
pkg/writers/mssql/writer_test.go
Normal file
205
pkg/writers/mssql/writer_test.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestGenerateColumnDefinition tests column definition generation
|
||||
func TestGenerateColumnDefinition(t *testing.T) {
|
||||
writer := NewWriter(&writers.WriterOptions{})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
column *models.Column
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "INT NOT NULL",
|
||||
column: &models.Column{
|
||||
Name: "id",
|
||||
Type: "int",
|
||||
NotNull: true,
|
||||
Sequence: 1,
|
||||
},
|
||||
expected: "[id] INT NOT NULL",
|
||||
},
|
||||
{
|
||||
name: "VARCHAR with length",
|
||||
column: &models.Column{
|
||||
Name: "name",
|
||||
Type: "string",
|
||||
Length: 100,
|
||||
NotNull: true,
|
||||
Sequence: 2,
|
||||
},
|
||||
expected: "[name] NVARCHAR(100) NOT NULL",
|
||||
},
|
||||
{
|
||||
name: "DATETIME2 with default",
|
||||
column: &models.Column{
|
||||
Name: "created_at",
|
||||
Type: "timestamp",
|
||||
NotNull: true,
|
||||
Default: "GETDATE()",
|
||||
Sequence: 3,
|
||||
},
|
||||
expected: "[created_at] DATETIME2 NOT NULL DEFAULT GETDATE()",
|
||||
},
|
||||
{
|
||||
name: "IDENTITY column",
|
||||
column: &models.Column{
|
||||
Name: "id",
|
||||
Type: "int",
|
||||
AutoIncrement: true,
|
||||
NotNull: true,
|
||||
Sequence: 1,
|
||||
},
|
||||
expected: "[id] INT IDENTITY(1,1) NOT NULL",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := writer.generateColumnDefinition(tt.column)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteCreateTable tests CREATE TABLE statement generation
|
||||
func TestWriteCreateTable(t *testing.T) {
|
||||
writer := NewWriter(&writers.WriterOptions{})
|
||||
|
||||
// Create a test schema with a table
|
||||
schema := models.InitSchema("dbo")
|
||||
table := models.InitTable("users", "dbo")
|
||||
|
||||
col1 := models.InitColumn("id", "users", "dbo")
|
||||
col1.Type = "int"
|
||||
col1.AutoIncrement = true
|
||||
col1.NotNull = true
|
||||
col1.Sequence = 1
|
||||
|
||||
col2 := models.InitColumn("email", "users", "dbo")
|
||||
col2.Type = "string"
|
||||
col2.Length = 255
|
||||
col2.NotNull = true
|
||||
col2.Sequence = 2
|
||||
|
||||
table.Columns["id"] = col1
|
||||
table.Columns["email"] = col2
|
||||
|
||||
// Write to buffer
|
||||
buf := &bytes.Buffer{}
|
||||
writer.writer = buf
|
||||
|
||||
err := writer.writeCreateTable(schema, table)
|
||||
assert.NoError(t, err)
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "CREATE TABLE [dbo].[users]")
|
||||
assert.Contains(t, output, "[id] INT IDENTITY(1,1) NOT NULL")
|
||||
assert.Contains(t, output, "[email] NVARCHAR(255) NOT NULL")
|
||||
}
|
||||
|
||||
// TestWritePrimaryKey tests PRIMARY KEY constraint generation
|
||||
func TestWritePrimaryKey(t *testing.T) {
|
||||
writer := NewWriter(&writers.WriterOptions{})
|
||||
|
||||
schema := models.InitSchema("dbo")
|
||||
table := models.InitTable("users", "dbo")
|
||||
|
||||
// Add primary key constraint
|
||||
pk := models.InitConstraint("PK_users_id", models.PrimaryKeyConstraint)
|
||||
pk.Columns = []string{"id"}
|
||||
table.Constraints[pk.Name] = pk
|
||||
|
||||
// Add column
|
||||
col := models.InitColumn("id", "users", "dbo")
|
||||
col.Type = "int"
|
||||
col.Sequence = 1
|
||||
table.Columns["id"] = col
|
||||
|
||||
// Write to buffer
|
||||
buf := &bytes.Buffer{}
|
||||
writer.writer = buf
|
||||
|
||||
err := writer.writePrimaryKey(schema, table)
|
||||
assert.NoError(t, err)
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "ALTER TABLE [dbo].[users]")
|
||||
assert.Contains(t, output, "PRIMARY KEY")
|
||||
assert.Contains(t, output, "[id]")
|
||||
}
|
||||
|
||||
// TestWriteForeignKey tests FOREIGN KEY constraint generation
|
||||
func TestWriteForeignKey(t *testing.T) {
|
||||
writer := NewWriter(&writers.WriterOptions{})
|
||||
|
||||
schema := models.InitSchema("dbo")
|
||||
table := models.InitTable("orders", "dbo")
|
||||
|
||||
// Add foreign key constraint
|
||||
fk := models.InitConstraint("FK_orders_users", models.ForeignKeyConstraint)
|
||||
fk.Columns = []string{"user_id"}
|
||||
fk.ReferencedSchema = "dbo"
|
||||
fk.ReferencedTable = "users"
|
||||
fk.ReferencedColumns = []string{"id"}
|
||||
fk.OnDelete = "CASCADE"
|
||||
fk.OnUpdate = "NO ACTION"
|
||||
table.Constraints[fk.Name] = fk
|
||||
|
||||
// Add column
|
||||
col := models.InitColumn("user_id", "orders", "dbo")
|
||||
col.Type = "int"
|
||||
col.Sequence = 1
|
||||
table.Columns["user_id"] = col
|
||||
|
||||
// Write to buffer
|
||||
buf := &bytes.Buffer{}
|
||||
writer.writer = buf
|
||||
|
||||
err := writer.writeForeignKeys(schema, table)
|
||||
assert.NoError(t, err)
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "ALTER TABLE [dbo].[orders]")
|
||||
assert.Contains(t, output, "FK_orders_users")
|
||||
assert.Contains(t, output, "FOREIGN KEY")
|
||||
assert.Contains(t, output, "CASCADE")
|
||||
assert.Contains(t, output, "NO ACTION")
|
||||
}
|
||||
|
||||
// TestWriteComments tests extended property generation for comments
|
||||
func TestWriteComments(t *testing.T) {
|
||||
writer := NewWriter(&writers.WriterOptions{})
|
||||
|
||||
schema := models.InitSchema("dbo")
|
||||
table := models.InitTable("users", "dbo")
|
||||
table.Description = "User accounts table"
|
||||
|
||||
col := models.InitColumn("id", "users", "dbo")
|
||||
col.Type = "int"
|
||||
col.Description = "Primary key"
|
||||
col.Sequence = 1
|
||||
table.Columns["id"] = col
|
||||
|
||||
// Write to buffer
|
||||
buf := &bytes.Buffer{}
|
||||
writer.writer = buf
|
||||
|
||||
err := writer.writeComments(schema, table)
|
||||
assert.NoError(t, err)
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "sp_addextendedproperty")
|
||||
assert.Contains(t, output, "MS_Description")
|
||||
assert.Contains(t, output, "User accounts table")
|
||||
assert.Contains(t, output, "Primary key")
|
||||
}
|
||||
@@ -493,18 +493,19 @@ func (w *Writer) generateColumnDefinition(col *models.Column) string {
|
||||
// Type with length/precision - convert to valid PostgreSQL type
|
||||
baseType := pgsql.ConvertSQLType(col.Type)
|
||||
typeStr := baseType
|
||||
hasExplicitTypeModifier := pgsql.HasExplicitTypeModifier(baseType)
|
||||
|
||||
// Only add size specifiers for types that support them
|
||||
if col.Length > 0 && col.Precision == 0 {
|
||||
if supportsLength(baseType) {
|
||||
if !hasExplicitTypeModifier && col.Length > 0 && col.Precision == 0 {
|
||||
if pgsql.SupportsLength(baseType) {
|
||||
typeStr = fmt.Sprintf("%s(%d)", baseType, col.Length)
|
||||
} else if isTextTypeWithoutLength(baseType) {
|
||||
// Convert text with length to varchar
|
||||
typeStr = fmt.Sprintf("varchar(%d)", col.Length)
|
||||
}
|
||||
// For types that don't support length (integer, bigint, etc.), ignore the length
|
||||
} else if col.Precision > 0 {
|
||||
if supportsPrecision(baseType) {
|
||||
} else if !hasExplicitTypeModifier && col.Precision > 0 {
|
||||
if pgsql.SupportsPrecision(baseType) {
|
||||
if col.Scale > 0 {
|
||||
typeStr = fmt.Sprintf("%s(%d,%d)", baseType, col.Precision, col.Scale)
|
||||
} else {
|
||||
@@ -1268,30 +1269,6 @@ func isTextType(colType string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// supportsLength checks if a PostgreSQL type supports length specification
|
||||
func supportsLength(colType string) bool {
|
||||
lengthTypes := []string{"varchar", "character varying", "char", "character", "bit", "bit varying", "varbit"}
|
||||
lowerType := strings.ToLower(colType)
|
||||
for _, t := range lengthTypes {
|
||||
if lowerType == t || strings.HasPrefix(lowerType, t+"(") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// supportsPrecision checks if a PostgreSQL type supports precision/scale specification
|
||||
func supportsPrecision(colType string) bool {
|
||||
precisionTypes := []string{"numeric", "decimal", "time", "timestamp", "timestamptz", "timestamp with time zone", "timestamp without time zone", "time with time zone", "time without time zone", "interval"}
|
||||
lowerType := strings.ToLower(colType)
|
||||
for _, t := range precisionTypes {
|
||||
if lowerType == t || strings.HasPrefix(lowerType, t+"(") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isTextTypeWithoutLength checks if type is text (which should convert to varchar when length is specified)
|
||||
func isTextTypeWithoutLength(colType string) bool {
|
||||
return strings.EqualFold(colType, "text")
|
||||
|
||||
@@ -426,11 +426,11 @@ func TestWriteAllConstraintTypes(t *testing.T) {
|
||||
|
||||
// Verify all constraint types are present
|
||||
expectedConstraints := map[string]string{
|
||||
"Primary Key": "PRIMARY KEY",
|
||||
"Unique": "ADD CONSTRAINT uq_order_number UNIQUE (order_number)",
|
||||
"Check (total)": "ADD CONSTRAINT ck_total_positive CHECK (total > 0)",
|
||||
"Check (status)": "ADD CONSTRAINT ck_status_valid CHECK (status IN ('pending', 'completed', 'cancelled'))",
|
||||
"Foreign Key": "FOREIGN KEY",
|
||||
"Primary Key": "PRIMARY KEY",
|
||||
"Unique": "ADD CONSTRAINT uq_order_number UNIQUE (order_number)",
|
||||
"Check (total)": "ADD CONSTRAINT ck_total_positive CHECK (total > 0)",
|
||||
"Check (status)": "ADD CONSTRAINT ck_status_valid CHECK (status IN ('pending', 'completed', 'cancelled'))",
|
||||
"Foreign Key": "FOREIGN KEY",
|
||||
}
|
||||
|
||||
for name, expected := range expectedConstraints {
|
||||
@@ -715,11 +715,11 @@ func TestColumnSizeSpecifiers(t *testing.T) {
|
||||
|
||||
// Verify valid patterns ARE present
|
||||
validPatterns := []string{
|
||||
"integer", // without size
|
||||
"bigint", // without size
|
||||
"smallint", // without size
|
||||
"varchar(100)", // text converted to varchar with length
|
||||
"varchar(50)", // varchar with length
|
||||
"integer", // without size
|
||||
"bigint", // without size
|
||||
"smallint", // without size
|
||||
"varchar(100)", // text converted to varchar with length
|
||||
"varchar(50)", // varchar with length
|
||||
"decimal(19,4)", // decimal with precision and scale
|
||||
}
|
||||
for _, pattern := range validPatterns {
|
||||
@@ -729,6 +729,56 @@ func TestColumnSizeSpecifiers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateColumnDefinition_PreservesExplicitTypeModifiers(t *testing.T) {
|
||||
writer := NewWriter(&writers.WriterOptions{})
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
colType string
|
||||
length int
|
||||
precision int
|
||||
scale int
|
||||
wantType string
|
||||
}{
|
||||
{
|
||||
name: "character varying already includes length",
|
||||
colType: "character varying(50)",
|
||||
length: 50,
|
||||
wantType: "character varying(50)",
|
||||
},
|
||||
{
|
||||
name: "numeric already includes precision",
|
||||
colType: "numeric(10,2)",
|
||||
precision: 10,
|
||||
scale: 2,
|
||||
wantType: "numeric(10,2)",
|
||||
},
|
||||
{
|
||||
name: "custom vector modifier preserved",
|
||||
colType: "vector(1536)",
|
||||
wantType: "vector(1536)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col := models.InitColumn("sample", "events", "public")
|
||||
col.Type = tc.colType
|
||||
col.Length = tc.length
|
||||
col.Precision = tc.precision
|
||||
col.Scale = tc.scale
|
||||
|
||||
def := writer.generateColumnDefinition(col)
|
||||
if !strings.Contains(def, " "+tc.wantType+" ") && !strings.HasSuffix(def, " "+tc.wantType) {
|
||||
t.Fatalf("generated definition %q does not contain expected type %q", def, tc.wantType)
|
||||
}
|
||||
if strings.Contains(def, ")(") {
|
||||
t.Fatalf("generated definition %q appears to duplicate modifiers", def)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateAddColumnStatements(t *testing.T) {
|
||||
// Create a test database with tables that have new columns
|
||||
db := models.InitDatabase("testdb")
|
||||
|
||||
@@ -125,9 +125,9 @@ func (w *Writer) generateGenerator() string {
|
||||
func (w *Writer) enumToPrisma(enum *models.Enum) string {
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString(fmt.Sprintf("enum %s {\n", enum.Name))
|
||||
fmt.Fprintf(&sb, "enum %s {\n", enum.Name)
|
||||
for _, value := range enum.Values {
|
||||
sb.WriteString(fmt.Sprintf(" %s\n", value))
|
||||
fmt.Fprintf(&sb, " %s\n", value)
|
||||
}
|
||||
sb.WriteString("}\n")
|
||||
|
||||
@@ -179,7 +179,7 @@ func (w *Writer) identifyJoinTables(schema *models.Schema) map[string]bool {
|
||||
func (w *Writer) tableToPrisma(table *models.Table, schema *models.Schema, joinTables map[string]bool) string {
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString(fmt.Sprintf("model %s {\n", table.Name))
|
||||
fmt.Fprintf(&sb, "model %s {\n", table.Name)
|
||||
|
||||
// Collect columns to write
|
||||
columns := make([]*models.Column, 0, len(table.Columns))
|
||||
@@ -219,11 +219,11 @@ func (w *Writer) columnToField(col *models.Column, table *models.Table, schema *
|
||||
var sb strings.Builder
|
||||
|
||||
// Field name
|
||||
sb.WriteString(fmt.Sprintf(" %s", col.Name))
|
||||
fmt.Fprintf(&sb, " %s", col.Name)
|
||||
|
||||
// Field type
|
||||
prismaType := w.sqlTypeToPrisma(col.Type, schema)
|
||||
sb.WriteString(fmt.Sprintf(" %s", prismaType))
|
||||
fmt.Fprintf(&sb, " %s", prismaType)
|
||||
|
||||
// Optional modifier
|
||||
if !col.NotNull && !col.IsPrimaryKey {
|
||||
@@ -413,7 +413,7 @@ func (w *Writer) generateRelationFields(table *models.Table, schema *models.Sche
|
||||
relationName = relationName[:len(relationName)-1]
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf(" %s %s", strings.ToLower(relationName), relationType))
|
||||
fmt.Fprintf(&sb, " %s %s", strings.ToLower(relationName), relationType)
|
||||
|
||||
if isOptional {
|
||||
sb.WriteString("?")
|
||||
@@ -479,8 +479,8 @@ func (w *Writer) generateInverseRelations(table *models.Table, schema *models.Sc
|
||||
if fk.ReferencedTable != table.Name {
|
||||
// This is the other side
|
||||
otherSide := fk.ReferencedTable
|
||||
sb.WriteString(fmt.Sprintf(" %ss %s[]\n",
|
||||
strings.ToLower(otherSide), otherSide))
|
||||
fmt.Fprintf(&sb, " %ss %s[]\n",
|
||||
strings.ToLower(otherSide), otherSide)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -497,8 +497,8 @@ func (w *Writer) generateInverseRelations(table *models.Table, schema *models.Sc
|
||||
pluralName += "s"
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf(" %s %s[]\n",
|
||||
strings.ToLower(pluralName), otherTable.Name))
|
||||
fmt.Fprintf(&sb, " %s %s[]\n",
|
||||
strings.ToLower(pluralName), otherTable.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -530,20 +530,20 @@ func (w *Writer) generateBlockAttributes(table *models.Table) string {
|
||||
|
||||
if len(pkCols) > 1 {
|
||||
sort.Strings(pkCols)
|
||||
sb.WriteString(fmt.Sprintf(" @@id([%s])\n", strings.Join(pkCols, ", ")))
|
||||
fmt.Fprintf(&sb, " @@id([%s])\n", strings.Join(pkCols, ", "))
|
||||
}
|
||||
|
||||
// @@unique for multi-column unique constraints
|
||||
for _, constraint := range table.Constraints {
|
||||
if constraint.Type == models.UniqueConstraint && len(constraint.Columns) > 1 {
|
||||
sb.WriteString(fmt.Sprintf(" @@unique([%s])\n", strings.Join(constraint.Columns, ", ")))
|
||||
fmt.Fprintf(&sb, " @@unique([%s])\n", strings.Join(constraint.Columns, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// @@index for indexes
|
||||
for _, index := range table.Indexes {
|
||||
if !index.Unique { // Unique indexes are handled by @@unique
|
||||
sb.WriteString(fmt.Sprintf(" @@index([%s])\n", strings.Join(index.Columns, ", ")))
|
||||
fmt.Fprintf(&sb, " @@index([%s])\n", strings.Join(index.Columns, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
215
pkg/writers/sqlite/README.md
Normal file
215
pkg/writers/sqlite/README.md
Normal file
@@ -0,0 +1,215 @@
|
||||
# SQLite Writer
|
||||
|
||||
SQLite DDL (Data Definition Language) writer for RelSpec. Converts database schemas to SQLite-compatible SQL statements.
|
||||
|
||||
## Features
|
||||
|
||||
- **Automatic Schema Flattening** - SQLite doesn't support PostgreSQL-style schemas, so table names are automatically flattened (e.g., `public.users` → `public_users`)
|
||||
- **Type Mapping** - Converts PostgreSQL data types to SQLite type affinities (TEXT, INTEGER, REAL, NUMERIC, BLOB)
|
||||
- **Auto-Increment Detection** - Automatically converts SERIAL types and auto-increment columns to `INTEGER PRIMARY KEY AUTOINCREMENT`
|
||||
- **Function Translation** - Converts PostgreSQL functions to SQLite equivalents (e.g., `now()` → `CURRENT_TIMESTAMP`)
|
||||
- **Boolean Handling** - Maps boolean values to INTEGER (true=1, false=0)
|
||||
- **Constraint Generation** - Creates indexes, unique constraints, and documents foreign keys
|
||||
- **Identifier Quoting** - Properly quotes identifiers using double quotes
|
||||
|
||||
## Usage
|
||||
|
||||
### Convert PostgreSQL to SQLite
|
||||
|
||||
```bash
|
||||
relspec convert --from pgsql --from-conn "postgres://user:pass@localhost/mydb" \
|
||||
--to sqlite --to-path schema.sql
|
||||
```
|
||||
|
||||
### Convert DBML to SQLite
|
||||
|
||||
```bash
|
||||
relspec convert --from dbml --from-path schema.dbml \
|
||||
--to sqlite --to-path schema.sql
|
||||
```
|
||||
|
||||
### Multi-Schema Databases
|
||||
|
||||
SQLite doesn't support schemas, so multi-schema databases are automatically flattened:
|
||||
|
||||
```bash
|
||||
# Input has auth.users and public.posts
|
||||
# Output will have auth_users and public_posts
|
||||
relspec convert --from json --from-path multi_schema.json \
|
||||
--to sqlite --to-path flattened.sql
|
||||
```
|
||||
|
||||
## Type Mapping
|
||||
|
||||
| PostgreSQL Type | SQLite Affinity | Examples |
|
||||
|----------------|-----------------|----------|
|
||||
| TEXT | TEXT | varchar, text, char, citext, uuid, timestamp, json |
|
||||
| INTEGER | INTEGER | int, integer, smallint, bigint, serial, boolean |
|
||||
| REAL | REAL | real, float, double precision |
|
||||
| NUMERIC | NUMERIC | numeric, decimal |
|
||||
| BLOB | BLOB | bytea, blob |
|
||||
|
||||
## Auto-Increment Handling
|
||||
|
||||
Columns are converted to `INTEGER PRIMARY KEY AUTOINCREMENT` when they meet these criteria:
|
||||
- Marked as primary key
|
||||
- Integer type
|
||||
- Have `AutoIncrement` flag set, OR
|
||||
- Type contains "serial", OR
|
||||
- Default value contains "nextval"
|
||||
|
||||
**Example:**
|
||||
|
||||
```sql
|
||||
-- Input (PostgreSQL)
|
||||
CREATE TABLE users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(100)
|
||||
);
|
||||
|
||||
-- Output (SQLite)
|
||||
CREATE TABLE "users" (
|
||||
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"name" TEXT
|
||||
);
|
||||
```
|
||||
|
||||
## Default Value Translation
|
||||
|
||||
| PostgreSQL | SQLite | Notes |
|
||||
|-----------|--------|-------|
|
||||
| `now()`, `CURRENT_TIMESTAMP` | `CURRENT_TIMESTAMP` | Timestamp functions |
|
||||
| `CURRENT_DATE` | `CURRENT_DATE` | Date function |
|
||||
| `CURRENT_TIME` | `CURRENT_TIME` | Time function |
|
||||
| `true`, `false` | `1`, `0` | Boolean values |
|
||||
| `gen_random_uuid()` | *(removed)* | SQLite has no built-in UUID |
|
||||
| `nextval(...)` | *(removed)* | Handled by AUTOINCREMENT |
|
||||
|
||||
## Foreign Keys
|
||||
|
||||
Foreign keys are generated as commented-out ALTER TABLE statements for reference:
|
||||
|
||||
```sql
|
||||
-- Foreign key: fk_posts_user_id
|
||||
-- ALTER TABLE "posts" ADD CONSTRAINT "posts_fk_posts_user_id"
|
||||
-- FOREIGN KEY ("user_id")
|
||||
-- REFERENCES "users" ("id");
|
||||
-- Note: Foreign keys should be defined in CREATE TABLE for better SQLite compatibility
|
||||
```
|
||||
|
||||
For production use, define foreign keys directly in the CREATE TABLE statement or execute the ALTER TABLE commands after creating all tables.
|
||||
|
||||
## Constraints
|
||||
|
||||
- **Primary Keys**: Inline for auto-increment columns, separate constraint for composite keys
|
||||
- **Unique Constraints**: Converted to `CREATE UNIQUE INDEX` statements
|
||||
- **Check Constraints**: Generated as comments (should be added to CREATE TABLE manually)
|
||||
- **Indexes**: Generated without PostgreSQL-specific features (no GIN, GiST, operator classes)
|
||||
|
||||
## Output Structure
|
||||
|
||||
Generated SQL follows this order:
|
||||
|
||||
1. Header comments
|
||||
2. `PRAGMA foreign_keys = ON;`
|
||||
3. CREATE TABLE statements (sorted by schema, then table)
|
||||
4. CREATE INDEX statements
|
||||
5. CREATE UNIQUE INDEX statements (for unique constraints)
|
||||
6. Check constraint comments
|
||||
7. Foreign key comments
|
||||
|
||||
## Example
|
||||
|
||||
**Input (multi-schema PostgreSQL):**
|
||||
|
||||
```sql
|
||||
CREATE SCHEMA auth;
|
||||
CREATE TABLE auth.users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
username VARCHAR(50) UNIQUE NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE SCHEMA public;
|
||||
CREATE TABLE public.posts (
|
||||
id SERIAL PRIMARY KEY,
|
||||
user_id INTEGER REFERENCES auth.users(id),
|
||||
title VARCHAR(200) NOT NULL,
|
||||
published BOOLEAN DEFAULT false
|
||||
);
|
||||
```
|
||||
|
||||
**Output (SQLite with flattened schemas):**
|
||||
|
||||
```sql
|
||||
-- SQLite Database Schema
|
||||
-- Database: mydb
|
||||
-- Generated by RelSpec
|
||||
-- Note: Schema names have been flattened (e.g., public.users -> public_users)
|
||||
|
||||
-- Enable foreign key constraints
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
-- Schema: auth (flattened into table names)
|
||||
|
||||
CREATE TABLE "auth_users" (
|
||||
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"username" TEXT NOT NULL,
|
||||
"created_at" TEXT DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX "auth_users_users_username_key" ON "auth_users" ("username");
|
||||
|
||||
-- Schema: public (flattened into table names)
|
||||
|
||||
CREATE TABLE "public_posts" (
|
||||
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"user_id" INTEGER NOT NULL,
|
||||
"title" TEXT NOT NULL,
|
||||
"published" INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
-- Foreign key: posts_user_id_fkey
|
||||
-- ALTER TABLE "public_posts" ADD CONSTRAINT "public_posts_posts_user_id_fkey"
|
||||
-- FOREIGN KEY ("user_id")
|
||||
-- REFERENCES "auth_users" ("id");
|
||||
-- Note: Foreign keys should be defined in CREATE TABLE for better SQLite compatibility
|
||||
```
|
||||
|
||||
## Programmatic Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers/sqlite"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create writer (automatically enables schema flattening)
|
||||
writer := sqlite.NewWriter(&writers.WriterOptions{
|
||||
OutputPath: "schema.sql",
|
||||
})
|
||||
|
||||
// Write database schema
|
||||
db := &models.Database{
|
||||
Name: "mydb",
|
||||
Schemas: []*models.Schema{
|
||||
// ... your schema data
|
||||
},
|
||||
}
|
||||
|
||||
err := writer.WriteDatabase(db)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- Schema flattening is **always enabled** for SQLite output (cannot be disabled)
|
||||
- Constraint and index names are prefixed with the flattened table name to avoid collisions
|
||||
- Generated SQL is compatible with SQLite 3.x
|
||||
- Foreign key constraints require `PRAGMA foreign_keys = ON;` to be enforced
|
||||
- For complex schemas, review and test the generated SQL before use in production
|
||||
89
pkg/writers/sqlite/datatypes.go
Normal file
89
pkg/writers/sqlite/datatypes.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SQLite type affinities
|
||||
const (
|
||||
TypeText = "TEXT"
|
||||
TypeInteger = "INTEGER"
|
||||
TypeReal = "REAL"
|
||||
TypeNumeric = "NUMERIC"
|
||||
TypeBlob = "BLOB"
|
||||
)
|
||||
|
||||
// MapPostgreSQLType maps PostgreSQL data types to SQLite type affinities
|
||||
func MapPostgreSQLType(pgType string) string {
|
||||
// Normalize the type
|
||||
normalized := strings.ToLower(strings.TrimSpace(pgType))
|
||||
|
||||
// Remove array notation if present
|
||||
normalized = strings.TrimSuffix(normalized, "[]")
|
||||
|
||||
// Remove precision/scale if present
|
||||
if idx := strings.Index(normalized, "("); idx != -1 {
|
||||
normalized = normalized[:idx]
|
||||
}
|
||||
|
||||
// Map to SQLite type affinity
|
||||
switch normalized {
|
||||
// TEXT affinity
|
||||
case "varchar", "character varying", "text", "char", "character",
|
||||
"citext", "uuid", "timestamp", "timestamptz", "timestamp with time zone",
|
||||
"timestamp without time zone", "date", "time", "timetz", "time with time zone",
|
||||
"time without time zone", "json", "jsonb", "xml", "inet", "cidr", "macaddr":
|
||||
return TypeText
|
||||
|
||||
// INTEGER affinity
|
||||
case "int", "int2", "int4", "int8", "integer", "smallint", "bigint",
|
||||
"serial", "smallserial", "bigserial", "boolean", "bool":
|
||||
return TypeInteger
|
||||
|
||||
// REAL affinity
|
||||
case "real", "float", "float4", "float8", "double precision":
|
||||
return TypeReal
|
||||
|
||||
// NUMERIC affinity
|
||||
case "numeric", "decimal", "money":
|
||||
return TypeNumeric
|
||||
|
||||
// BLOB affinity
|
||||
case "bytea", "blob":
|
||||
return TypeBlob
|
||||
|
||||
default:
|
||||
// Default to TEXT for unknown types
|
||||
return TypeText
|
||||
}
|
||||
}
|
||||
|
||||
// IsIntegerType checks if a column type should be treated as integer
|
||||
func IsIntegerType(colType string) bool {
|
||||
normalized := strings.ToLower(strings.TrimSpace(colType))
|
||||
normalized = strings.TrimSuffix(normalized, "[]")
|
||||
if idx := strings.Index(normalized, "("); idx != -1 {
|
||||
normalized = normalized[:idx]
|
||||
}
|
||||
|
||||
switch normalized {
|
||||
case "int", "int2", "int4", "int8", "integer", "smallint", "bigint",
|
||||
"serial", "smallserial", "bigserial":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// MapBooleanValue converts PostgreSQL boolean literals to SQLite (0/1)
|
||||
func MapBooleanValue(value string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(value))
|
||||
switch normalized {
|
||||
case "true", "t", "yes", "y", "1":
|
||||
return "1"
|
||||
case "false", "f", "no", "n", "0":
|
||||
return "0"
|
||||
default:
|
||||
return value
|
||||
}
|
||||
}
|
||||
146
pkg/writers/sqlite/template_functions.go
Normal file
146
pkg/writers/sqlite/template_functions.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
// GetTemplateFuncs returns template functions for SQLite SQL generation
|
||||
func GetTemplateFuncs(opts *writers.WriterOptions) template.FuncMap {
|
||||
return template.FuncMap{
|
||||
"quote_ident": QuoteIdentifier,
|
||||
"map_type": MapPostgreSQLType,
|
||||
"is_autoincrement": IsAutoIncrementCandidate,
|
||||
"qualified_table_name": func(schema, table string) string {
|
||||
return writers.QualifiedTableName(schema, table, opts.FlattenSchema)
|
||||
},
|
||||
"format_default": FormatDefault,
|
||||
"format_constraint_name": func(schema, table, constraint string) string {
|
||||
return FormatConstraintName(schema, table, constraint, opts)
|
||||
},
|
||||
"join": strings.Join,
|
||||
"lower": strings.ToLower,
|
||||
"upper": strings.ToUpper,
|
||||
}
|
||||
}
|
||||
|
||||
// QuoteIdentifier quotes an identifier for SQLite (double quotes)
|
||||
func QuoteIdentifier(name string) string {
|
||||
// SQLite uses double quotes for identifiers
|
||||
// Escape any existing double quotes by doubling them
|
||||
escaped := strings.ReplaceAll(name, `"`, `""`)
|
||||
return fmt.Sprintf(`"%s"`, escaped)
|
||||
}
|
||||
|
||||
// IsAutoIncrementCandidate checks if a column should use AUTOINCREMENT
|
||||
func IsAutoIncrementCandidate(col *models.Column) bool {
|
||||
// Must be a primary key
|
||||
if !col.IsPrimaryKey {
|
||||
return false
|
||||
}
|
||||
|
||||
// Must be an integer type
|
||||
if !IsIntegerType(col.Type) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check AutoIncrement field
|
||||
if col.AutoIncrement {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if default suggests auto-increment
|
||||
if col.Default != nil {
|
||||
defaultStr, ok := col.Default.(string)
|
||||
if ok {
|
||||
defaultLower := strings.ToLower(defaultStr)
|
||||
if strings.Contains(defaultLower, "nextval") ||
|
||||
strings.Contains(defaultLower, "autoincrement") ||
|
||||
strings.Contains(defaultLower, "auto_increment") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Serial types are auto-increment
|
||||
typeLower := strings.ToLower(col.Type)
|
||||
return strings.Contains(typeLower, "serial")
|
||||
}
|
||||
|
||||
// FormatDefault formats a default value for SQLite
|
||||
func FormatDefault(col *models.Column) string {
|
||||
if col.Default == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Skip auto-increment defaults (handled by AUTOINCREMENT keyword)
|
||||
if IsAutoIncrementCandidate(col) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Convert to string
|
||||
defaultStr, ok := col.Default.(string)
|
||||
if !ok {
|
||||
// If not a string, convert to string representation
|
||||
defaultStr = fmt.Sprintf("%v", col.Default)
|
||||
}
|
||||
|
||||
if defaultStr == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Convert PostgreSQL-specific functions to SQLite equivalents
|
||||
defaultLower := strings.ToLower(defaultStr)
|
||||
|
||||
// Current timestamp functions
|
||||
if strings.Contains(defaultLower, "current_timestamp") ||
|
||||
strings.Contains(defaultLower, "now()") {
|
||||
return "CURRENT_TIMESTAMP"
|
||||
}
|
||||
|
||||
// Current date
|
||||
if strings.Contains(defaultLower, "current_date") {
|
||||
return "CURRENT_DATE"
|
||||
}
|
||||
|
||||
// Current time
|
||||
if strings.Contains(defaultLower, "current_time") {
|
||||
return "CURRENT_TIME"
|
||||
}
|
||||
|
||||
// Boolean values
|
||||
sqliteType := MapPostgreSQLType(col.Type)
|
||||
if sqliteType == TypeInteger {
|
||||
typeLower := strings.ToLower(col.Type)
|
||||
if strings.Contains(typeLower, "bool") {
|
||||
return MapBooleanValue(defaultStr)
|
||||
}
|
||||
}
|
||||
|
||||
// UUID generation - SQLite doesn't have built-in UUID, comment it out
|
||||
if strings.Contains(defaultLower, "uuid") || strings.Contains(defaultLower, "gen_random_uuid") {
|
||||
return "" // Remove UUID defaults, users must handle this
|
||||
}
|
||||
|
||||
// Remove PostgreSQL-specific casting
|
||||
defaultStr = strings.ReplaceAll(defaultStr, "::text", "")
|
||||
defaultStr = strings.ReplaceAll(defaultStr, "::integer", "")
|
||||
defaultStr = strings.ReplaceAll(defaultStr, "::bigint", "")
|
||||
defaultStr = strings.ReplaceAll(defaultStr, "::boolean", "")
|
||||
|
||||
return defaultStr
|
||||
}
|
||||
|
||||
// FormatConstraintName formats a constraint name with table prefix if flattening
|
||||
func FormatConstraintName(schema, table, constraint string, opts *writers.WriterOptions) string {
|
||||
if opts.FlattenSchema && schema != "" {
|
||||
// Prefix constraint with flattened table name
|
||||
flatTable := writers.QualifiedTableName(schema, table, opts.FlattenSchema)
|
||||
return fmt.Sprintf("%s_%s", flatTable, constraint)
|
||||
}
|
||||
return constraint
|
||||
}
|
||||
174
pkg/writers/sqlite/templates.go
Normal file
174
pkg/writers/sqlite/templates.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"fmt"
|
||||
"text/template"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/writers"
|
||||
)
|
||||
|
||||
//go:embed templates/*.tmpl
|
||||
var templateFS embed.FS
|
||||
|
||||
// TemplateExecutor manages and executes SQLite SQL templates
|
||||
type TemplateExecutor struct {
|
||||
templates *template.Template
|
||||
options *writers.WriterOptions
|
||||
}
|
||||
|
||||
// NewTemplateExecutor creates a new template executor for SQLite
|
||||
func NewTemplateExecutor(opts *writers.WriterOptions) (*TemplateExecutor, error) {
|
||||
// Create template with SQLite-specific functions
|
||||
funcMap := GetTemplateFuncs(opts)
|
||||
|
||||
tmpl, err := template.New("").Funcs(funcMap).ParseFS(templateFS, "templates/*.tmpl")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse templates: %w", err)
|
||||
}
|
||||
|
||||
return &TemplateExecutor{
|
||||
templates: tmpl,
|
||||
options: opts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Template data structures
|
||||
|
||||
// TableTemplateData contains data for table template
|
||||
type TableTemplateData struct {
|
||||
Schema string
|
||||
Name string
|
||||
Columns []*models.Column
|
||||
PrimaryKey *models.Constraint
|
||||
}
|
||||
|
||||
// IndexTemplateData contains data for index template
|
||||
type IndexTemplateData struct {
|
||||
Schema string
|
||||
Table string
|
||||
Name string
|
||||
Columns []string
|
||||
}
|
||||
|
||||
// ConstraintTemplateData contains data for constraint templates
|
||||
type ConstraintTemplateData struct {
|
||||
Schema string
|
||||
Table string
|
||||
Name string
|
||||
Columns []string
|
||||
Expression string
|
||||
ForeignSchema string
|
||||
ForeignTable string
|
||||
ForeignColumns []string
|
||||
OnDelete string
|
||||
OnUpdate string
|
||||
}
|
||||
|
||||
// Execute methods
|
||||
|
||||
// ExecutePragmaForeignKeys executes the pragma foreign keys template
|
||||
func (te *TemplateExecutor) ExecutePragmaForeignKeys() (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := te.templates.ExecuteTemplate(&buf, "pragma_foreign_keys.tmpl", nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to execute pragma_foreign_keys template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ExecuteCreateTable executes the create table template
|
||||
func (te *TemplateExecutor) ExecuteCreateTable(data TableTemplateData) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := te.templates.ExecuteTemplate(&buf, "create_table.tmpl", data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to execute create_table template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ExecuteCreateIndex executes the create index template
|
||||
func (te *TemplateExecutor) ExecuteCreateIndex(data IndexTemplateData) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := te.templates.ExecuteTemplate(&buf, "create_index.tmpl", data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to execute create_index template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ExecuteCreateUniqueConstraint executes the create unique constraint template
|
||||
func (te *TemplateExecutor) ExecuteCreateUniqueConstraint(data ConstraintTemplateData) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := te.templates.ExecuteTemplate(&buf, "create_unique_constraint.tmpl", data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to execute create_unique_constraint template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ExecuteCreateCheckConstraint executes the create check constraint template
|
||||
func (te *TemplateExecutor) ExecuteCreateCheckConstraint(data ConstraintTemplateData) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := te.templates.ExecuteTemplate(&buf, "create_check_constraint.tmpl", data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to execute create_check_constraint template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ExecuteCreateForeignKey executes the create foreign key template
|
||||
func (te *TemplateExecutor) ExecuteCreateForeignKey(data ConstraintTemplateData) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := te.templates.ExecuteTemplate(&buf, "create_foreign_key.tmpl", data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to execute create_foreign_key template: %w", err)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// Helper functions to build template data from models
|
||||
|
||||
// BuildTableTemplateData builds TableTemplateData from a models.Table
|
||||
func BuildTableTemplateData(schema string, table *models.Table) TableTemplateData {
|
||||
// Get sorted columns
|
||||
columns := make([]*models.Column, 0, len(table.Columns))
|
||||
for _, col := range table.Columns {
|
||||
columns = append(columns, col)
|
||||
}
|
||||
|
||||
// Find primary key constraint
|
||||
var pk *models.Constraint
|
||||
for _, constraint := range table.Constraints {
|
||||
if constraint.Type == models.PrimaryKeyConstraint {
|
||||
pk = constraint
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If no explicit primary key constraint, build one from columns with IsPrimaryKey=true
|
||||
if pk == nil {
|
||||
pkCols := []string{}
|
||||
for _, col := range table.Columns {
|
||||
if col.IsPrimaryKey {
|
||||
pkCols = append(pkCols, col.Name)
|
||||
}
|
||||
}
|
||||
if len(pkCols) > 0 {
|
||||
pk = &models.Constraint{
|
||||
Name: "pk_" + table.Name,
|
||||
Type: models.PrimaryKeyConstraint,
|
||||
Columns: pkCols,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return TableTemplateData{
|
||||
Schema: schema,
|
||||
Name: table.Name,
|
||||
Columns: columns,
|
||||
PrimaryKey: pk,
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user