Added more examples and pgsql reader
This commit is contained in:
33
Makefile
33
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all build test lint coverage clean install help
|
||||
.PHONY: all build test lint coverage clean install help docker-up docker-down docker-test docker-test-integration
|
||||
|
||||
# Binary name
|
||||
BINARY_NAME=relspec
|
||||
@@ -58,5 +58,36 @@ deps: ## Download dependencies
|
||||
$(GOMOD) tidy
|
||||
@echo "Dependencies updated"
|
||||
|
||||
docker-up: ## Start PostgreSQL test database
|
||||
@echo "Starting PostgreSQL test database..."
|
||||
@if command -v docker-compose > /dev/null 2>&1; then \
|
||||
docker-compose up -d postgres; \
|
||||
else \
|
||||
docker compose up -d postgres; \
|
||||
fi
|
||||
@echo "Waiting for PostgreSQL to be ready..."
|
||||
@sleep 3
|
||||
@echo "PostgreSQL is running on port 5433"
|
||||
@echo "Connection: postgres://relspec:relspec_test_password@localhost:5433/relspec_test"
|
||||
|
||||
docker-down: ## Stop PostgreSQL test database
|
||||
@echo "Stopping PostgreSQL test database..."
|
||||
@if command -v docker-compose > /dev/null 2>&1; then \
|
||||
docker-compose down; \
|
||||
else \
|
||||
docker compose down; \
|
||||
fi
|
||||
@echo "PostgreSQL stopped"
|
||||
|
||||
docker-test: ## Run PostgreSQL integration tests with Docker
|
||||
@./tests/postgres/run_tests.sh
|
||||
|
||||
docker-test-integration: docker-up ## Start DB and run integration tests
|
||||
@echo "Running integration tests..."
|
||||
@sleep 2
|
||||
@RELSPEC_TEST_PG_CONN="postgres://relspec:relspec_test_password@localhost:5433/relspec_test" \
|
||||
$(GOTEST) -v ./pkg/readers/pgsql/ -count=1 || (make docker-down && exit 1)
|
||||
@make docker-down
|
||||
|
||||
help: ## Display this help screen
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
23
docker-compose.yml
Normal file
23
docker-compose.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: relspec-test-postgres
|
||||
environment:
|
||||
POSTGRES_USER: relspec
|
||||
POSTGRES_PASSWORD: relspec_test_password
|
||||
POSTGRES_DB: relspec_test
|
||||
ports:
|
||||
- "5433:5432" # Using 5433 to avoid conflicts with local PostgreSQL
|
||||
volumes:
|
||||
- ./tests/postgres/init.sql:/docker-entrypoint-initdb.d/init.sql
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U relspec -d relspec_test"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
715
examples/pgsql_meta_upgrade/audit/f_audit_build.sql
Normal file
715
examples/pgsql_meta_upgrade/audit/f_audit_build.sql
Normal file
@@ -0,0 +1,715 @@
|
||||
----select dropall('f_audit_build');
|
||||
create or replace function meta.f_audit_build(
|
||||
p_mode integer = 0,
|
||||
p_schema text default ''
|
||||
)
|
||||
returns table(
|
||||
scriptpriority integer
|
||||
,scripttype citext
|
||||
,objectname citext
|
||||
,scriptcode citext
|
||||
,schemaname citext
|
||||
)
|
||||
language plpgsql volatile
|
||||
as
|
||||
$$
|
||||
---p_mode=
|
||||
--0:all
|
||||
--1:only functions
|
||||
--2:only creates
|
||||
--3:only drops
|
||||
declare
|
||||
--Error Handling--
|
||||
m_funcname text = 'f_audit_build';
|
||||
m_errmsg text;
|
||||
m_errcontext text;
|
||||
m_errdetail text;
|
||||
m_errhint text;
|
||||
m_errstate text;
|
||||
m_retval integer;
|
||||
--Error Handling--
|
||||
|
||||
m_sql citext;
|
||||
m_sql2 citext;
|
||||
m_auditcol citext;
|
||||
m_dkeyfield citext;
|
||||
m_pkfield citext;
|
||||
m_skipcoreschema citext[];
|
||||
r_record record;
|
||||
r_recsub record;
|
||||
begin
|
||||
--columntype:
|
||||
-- 0=Normal Audited
|
||||
--, 1=Primary Key
|
||||
--, 2=Primary Key Audited
|
||||
--, 3=UserColumn
|
||||
--, 4=Normal Encrypted Audited
|
||||
--, 5 = Parent Table Primary Key for lookup (Must give Foreign Table)
|
||||
--, 6 = Parent Table Primary Key for delete (Must give Foreign Table)
|
||||
--, 9=View only, used for custom audit. e.g pplan
|
||||
--specialtype:
|
||||
-- 0,null = Leave alone.
|
||||
-- 1=Convert int date to sql date.
|
||||
-- 2=Convert int time to sql time.,
|
||||
-- 3=Status Field, --Note that when ColumnType is 3,Status field, the ForeignTable column must have the name of the search sent to F_GetDescription_Code
|
||||
-- 4=ForeignTable
|
||||
-- 5=ForeignTable, tableprefix
|
||||
-- 6 = Blob Column, will compare with size only
|
||||
-- 7 = Blob Column, will compare with hash only
|
||||
-- 8 = Hide
|
||||
m_skipcoreschema = array[]::citext[];
|
||||
SET LOCAL statement_timeout = '90s';
|
||||
|
||||
drop table if exists tmp_f_audittables;
|
||||
CREATE TEMP TABLE tmp_f_audittables(
|
||||
tablename citext
|
||||
,schemaname citext
|
||||
,tableprefix citext
|
||||
,columnname citext
|
||||
,columndesc citext
|
||||
,columntype integer
|
||||
,datatype citext
|
||||
,specialtype integer
|
||||
,foreigntable citext
|
||||
,foreigncolumn citext
|
||||
,foreignpk citext
|
||||
,addversion integer
|
||||
,savetoschema citext
|
||||
,PRIMARY KEY(columnname, schemaname,tablename, columntype)
|
||||
);
|
||||
--CREATE INDEX idx_tmp_f_audittables ON tmp_f_audittables (tablename, columnname);
|
||||
|
||||
if exists (
|
||||
select r.routine_name
|
||||
from information_schema.routines r
|
||||
where r.routine_name::citext = 'f_getauditinfo'
|
||||
and r.specific_schema = 'public'
|
||||
) and exists (
|
||||
select 1
|
||||
from information_schema.columns c
|
||||
where c.table_name = 't_adproclientcreditorl1'
|
||||
and c.table_schema = 'public'
|
||||
)
|
||||
then
|
||||
m_skipcoreschema = array_append(m_skipcoreschema,'public'::citext);
|
||||
|
||||
insert into tmp_f_audittables(tablename
|
||||
,schemaname
|
||||
,tableprefix
|
||||
,columnname
|
||||
,columndesc
|
||||
,columntype
|
||||
,datatype
|
||||
,specialtype
|
||||
,foreigntable
|
||||
,foreigncolumn
|
||||
,foreignpk
|
||||
,savetoschema
|
||||
)
|
||||
select distinct on (r.columnname, r.tablename, r.columntype)
|
||||
r.tablename
|
||||
,'public' as schemaname
|
||||
,r.tableprefix
|
||||
,r.columnname
|
||||
,r.columndescription
|
||||
,r.columntype
|
||||
,r.datatype
|
||||
,r.specialtype
|
||||
,r.foreigntable
|
||||
,r.foreigncolumn
|
||||
,r.foreignpk
|
||||
,'public' as savetoschema
|
||||
from meta.migration_table t2
|
||||
inner join public.f_getauditinfo(t2.tablename) r on t2.ismodel
|
||||
and t2.tablename = r.tablename
|
||||
and t2.schemaname = 'public'
|
||||
;
|
||||
|
||||
end if;
|
||||
|
||||
if exists (
|
||||
select r.routine_name
|
||||
from information_schema.routines r
|
||||
where r.routine_name::citext = 'f_getauditinfo'
|
||||
and r.specific_schema = 'meta'
|
||||
)
|
||||
then
|
||||
|
||||
insert into tmp_f_audittables(tablename
|
||||
,schemaname
|
||||
,tableprefix
|
||||
,columnname
|
||||
,columndesc
|
||||
,columntype
|
||||
,datatype
|
||||
,specialtype
|
||||
,foreigntable
|
||||
,foreigncolumn
|
||||
,foreignpk
|
||||
,savetoschema
|
||||
)
|
||||
select distinct on (r.columnname, r.tablename, r.columntype)
|
||||
r.tablename
|
||||
,r.schemaname
|
||||
,r.tableprefix
|
||||
,r.columnname
|
||||
,r.columndescription
|
||||
,r.columntype
|
||||
,r.datatype
|
||||
,r.specialtype
|
||||
,r.foreigntable
|
||||
,r.foreigncolumn
|
||||
,r.foreignpk
|
||||
,case when t2.schemaname = 'public' then 'public' else 'core' end as savetoschema
|
||||
from meta.migration_table t2
|
||||
inner join meta.f_getauditinfo(format('%s.%s',t2.schemaname,t2.tablename)) r on t2.ismodel
|
||||
and t2.tablename = r.tablename
|
||||
and t2.schemaname = r.schemaname
|
||||
and coalesce(r.schemaname,'') not in ('meta','')
|
||||
where coalesce(r.schemaname,'') <> all(m_skipcoreschema)
|
||||
;
|
||||
|
||||
delete from tmp_f_audittables a
|
||||
where a.tablename in (
|
||||
select distinct b.tablename
|
||||
from tmp_f_audittables b
|
||||
where b.columntype = 9
|
||||
);
|
||||
|
||||
if exists (
|
||||
select r.routine_name
|
||||
from information_schema.routines r
|
||||
where r.routine_name::citext = 'f_getauditversion'
|
||||
and r.specific_schema = 'meta'
|
||||
)
|
||||
then
|
||||
update tmp_f_audittables at
|
||||
set addversion = av.mode
|
||||
from (
|
||||
select r.tablename
|
||||
,r.columnname
|
||||
,r.mode
|
||||
from meta.f_getauditversion('') r
|
||||
) av
|
||||
where at.tablename::citext = av.tablename::citext
|
||||
and at.columnname::citext = av.columnname::citext
|
||||
;
|
||||
end if;
|
||||
|
||||
|
||||
end if;
|
||||
|
||||
delete from tmp_f_audittables t
|
||||
where t.tablename in (
|
||||
'version', 'upgrades', 'upgradescript', 'eventlog', 'event_notify', 'atevent',
|
||||
'atdetail', 'procused', 'lastprocrun', 'intface_transmission', 'setting','programmodule'
|
||||
,'_loginsession','event_notify','bt_enums','authsession'
|
||||
)
|
||||
or t.tablename in (select t.tablename from meta.f_upgrade_table() t );
|
||||
|
||||
|
||||
for r_record in (
|
||||
select distinct on (a.tablename,a.schemaname ,a.tableprefix)
|
||||
nv(a.tablename)::citext as tablename
|
||||
,nv(a.schemaname)::citext as schemaname
|
||||
,nv(a.tableprefix)::citext as tableprefix
|
||||
,COALESCE((select 1
|
||||
from tmp_f_audittables a2
|
||||
where a2.tablename = a.tablename
|
||||
and a2.addversion = 2
|
||||
limit 1
|
||||
),0) as addversion
|
||||
,nv(a.savetoschema,'core') as savetoschema
|
||||
from tmp_f_audittables a
|
||||
inner join information_schema.tables t on lower(t.table_name) = lower(a.tablename)
|
||||
and lower(t.table_schema) = lower(a.schemaname)
|
||||
)
|
||||
loop
|
||||
m_sql = '';
|
||||
m_sql2 = '';
|
||||
m_auditcol = 'if ( ';
|
||||
m_dkeyfield = '';
|
||||
m_pkfield = '';
|
||||
|
||||
--raise notice 'Tbl: %', r_record.tableprefix;
|
||||
|
||||
for r_recsub in (
|
||||
select nv(a.columnname) as columnname
|
||||
,nv(a.columntype) as columntype
|
||||
,nv(a.datatype) as datatype
|
||||
,nv(a.specialtype) as specialtype
|
||||
,nv(a.foreigntable) as foreigntable
|
||||
,nv(a.foreigncolumn) as foreigncolumn
|
||||
,nv(a.foreignpk) as foreignpk
|
||||
,nv(a.addversion) as addversion
|
||||
,row_number() over (order by a.tablename, a.columnname) rownr
|
||||
,r_record.savetoschema as savetoschema
|
||||
from tmp_f_audittables a
|
||||
where a.tablename = r_record.tablename
|
||||
and a.schemaname = r_record.schemaname
|
||||
and exists (
|
||||
select 1
|
||||
from information_schema.columns c
|
||||
where lower(c.table_name) = lower(a.tablename)
|
||||
and lower(c.table_schema) = lower(a.schemaname)
|
||||
and lower(c.column_name) = lower(a.columnname)
|
||||
)
|
||||
)
|
||||
loop
|
||||
--raise notice 'Col: %' ,r_recsub.columnname;
|
||||
|
||||
if r_recsub.columntype in (0,2,4)
|
||||
then
|
||||
if r_recsub.datatype = 'jsonb'
|
||||
then
|
||||
m_sql = m_sql || chr(13) || chr(10) || ' '
|
||||
|| 'if (old.'|| r_recsub.columnname ||' IS DISTINCT FROM new.'|| r_recsub.columnname || ') then'
|
||||
|| chr(13) || chr(10) || ' '
|
||||
|| case when r_recsub.savetoschema = 'core'
|
||||
then 'insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto)'
|
||||
else 'insert into public.atdetail(rid_parent, datacolumn, changedfrom, changedto)'
|
||||
end
|
||||
|| chr(13) || chr(10)
|
||||
|| $SS$
|
||||
select m_atevent, k
|
||||
, case when r.jsonvalue->k->>'action' = 'insert' then null else r.jsonvalue->k->>'oldvalue' end as changedfrom
|
||||
, case when r.jsonvalue->k->>'action' = 'delete' then null else r.jsonvalue->k->>'value' end as changedto
|
||||
from f_json_keydiff(to_jsonb(new.$SS$|| r_recsub.columnname || $SS$), to_jsonb(old.$SS$|| r_recsub.columnname || $SS$),'$SS$|| r_recsub.columnname || $SS$') r(jsonvalue)
|
||||
cross join jsonb_object_keys(r.jsonvalue) k
|
||||
;
|
||||
$SS$
|
||||
|| chr(13) || chr(10) || ' end if;'
|
||||
|| chr(13) || chr(10) ;
|
||||
|
||||
|
||||
else
|
||||
--detail for updates
|
||||
m_sql = m_sql || chr(13) || chr(10) || ' '
|
||||
|| 'if (old.'|| r_recsub.columnname ||' IS DISTINCT FROM new.'|| r_recsub.columnname || ') then'
|
||||
|| chr(13) || chr(10) || ' '
|
||||
|| case when r_recsub.savetoschema = 'core'
|
||||
then 'insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto)'
|
||||
else 'insert into public.atdetail(rid_parent, datacolumn, changedfrom, changedto)'
|
||||
end
|
||||
|
||||
|| chr(13) || chr(10) || ' '
|
||||
|| 'values (m_atevent, '''|| r_recsub.columnname ||''', substr('
|
||||
|| case --OLD
|
||||
--TIME
|
||||
when r_recsub.specialtype = 2 then 'f_inttotm(old.'|| r_recsub.columnname || ')::text'
|
||||
--desc code lookup
|
||||
when r_recsub.specialtype = 3 and r_recsub.columntype = 0 and COALESCE(r_recsub.foreigntable,'') <> ''
|
||||
then 'f_getdesccode(''' || r_recsub.foreigntable || ''', old.'|| r_recsub.columnname || '::text)::text'
|
||||
--foreign table sub select
|
||||
when r_recsub.specialtype in (4,9) and r_recsub.columntype = 0 and COALESCE(r_recsub.foreigntable,'') <> ''
|
||||
then case when r_recsub.foreigncolumn ilike '(%' then '(select ' else '(select ft.' end --Detect sub select.
|
||||
|| r_recsub.foreigncolumn || ' from ' || r_recsub.foreigntable || ' ft where ft.'|| r_recsub.foreignpk
|
||||
||' = old.'|| r_recsub.columnname || ' limit 1)::text'
|
||||
--blobs, size
|
||||
when r_recsub.specialtype = 6 and r_recsub.columntype in (0,3,4) and r_recsub.datatype = 'bytea'
|
||||
then 'octet_length(old.' || r_recsub.columnname ||')::text'
|
||||
--blobs, md5
|
||||
when r_recsub.specialtype = 7 and r_recsub.columntype in (0,3,4) and r_recsub.datatype = 'bytea'
|
||||
then '( select h.p_hash from pl_hashbytes(old.' || r_recsub.columnname ||') h limit 1 )::text'
|
||||
--Hide (All)
|
||||
when r_recsub.specialtype = 8 or r_recsub.columntype = 4
|
||||
then '''****************''::text'
|
||||
|
||||
--default
|
||||
else ' old.'|| r_recsub.columnname || '::text'
|
||||
end
|
||||
||',1,1000) , substr('
|
||||
|| case --NEW
|
||||
--TIME
|
||||
when r_recsub.specialtype = 2 then 'f_inttotm(new.'|| r_recsub.columnname || ')::text'
|
||||
--desc code lookup
|
||||
when r_recsub.specialtype = 3 and r_recsub.columntype = 0 and COALESCE(r_recsub.foreigntable,'') <> ''
|
||||
then 'f_getdesccode(''' || r_recsub.foreigntable || ''', new.'|| r_recsub.columnname || '::text)::text'
|
||||
--foreign table sub select
|
||||
when r_recsub.specialtype = 4 and r_recsub.columntype = 0 and COALESCE(r_recsub.foreigntable,'') <> ''
|
||||
then '(select ft.' || r_recsub.foreigncolumn || ' from ' || r_recsub.foreigntable || ' ft where ft.'|| r_recsub.foreignpk
|
||||
||' = new.'|| r_recsub.columnname || ' limit 1)::text'
|
||||
--foreign table sub select with table prefix
|
||||
when r_recsub.specialtype = 5 and COALESCE(r_recsub.foreigntable,'') <> ''
|
||||
then '(select ft.' || r_recsub.foreigncolumn || ' from ' || r_recsub.foreigntable || ' ft where ft.'|| r_recsub.foreignpk
|
||||
||' = new.'|| r_recsub.columnname || ' limit 1)::text'
|
||||
--blobs, size
|
||||
when r_recsub.specialtype = 6 and r_recsub.columntype in (0,3,4) and r_recsub.datatype = 'bytea'
|
||||
then 'octet_length(new.' || r_recsub.columnname ||')::text'
|
||||
--blobs, md5
|
||||
when r_recsub.specialtype = 7 and r_recsub.columntype in (0,3,4) and r_recsub.datatype = 'bytea'
|
||||
then '( select h.p_hash from pl_hashbytes(new.' || r_recsub.columnname ||') h limit 1 )::text'
|
||||
--Hide (All)
|
||||
when r_recsub.specialtype = 8 or r_recsub.columntype = 4
|
||||
then '''****************''::text'
|
||||
--default
|
||||
else ' new.'|| r_recsub.columnname || '::text'
|
||||
end
|
||||
||',1,1000));' || chr(13) || chr(10) || ' '
|
||||
|| case when r_recsub.addversion = 1 then E'\n '
|
||||
|| case when r_recsub.savetoschema = 'public'
|
||||
then E' insert into public.atdetail(rid_parent, datacolumn, changedfrom, changedto)
|
||||
select m_atevent, ''version'', null,(select version from version limit 1) \n
|
||||
where not exists (
|
||||
select 1
|
||||
from public.atdetail ad
|
||||
where ad.rid_parent = m_atevent
|
||||
and ad.datacolumn = ''version''
|
||||
) and not (changedfrom is null and changedto is null)
|
||||
;\n
|
||||
'
|
||||
else E' insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto)
|
||||
select m_atevent, ''version'', null,(select version from version limit 1) \n
|
||||
where not exists (
|
||||
select 1
|
||||
from core.atdetail ad
|
||||
where ad.rid_atevent = m_atevent
|
||||
and ad.datacolumn = ''version''
|
||||
) and not (changedfrom is null and changedto is null)
|
||||
;\n'
|
||||
end || E' \n'
|
||||
else ''
|
||||
end
|
||||
|| chr(13) || chr(10) || ' end if;'
|
||||
|| chr(13) || chr(10) ;
|
||||
|
||||
end if;
|
||||
|
||||
--detail for deletes
|
||||
m_sql2 = m_sql2 || chr(13) || chr(10) || ' '
|
||||
|| case when r_recsub.savetoschema = 'public'
|
||||
then 'insert into public.atdetail(rid_parent, datacolumn, changedfrom, changedto)'
|
||||
else 'insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto)'
|
||||
end
|
||||
|| chr(13) || chr(10) || ' '
|
||||
|| 'values (m_atevent, '''|| r_recsub.columnname ||''', ' || case
|
||||
when r_recsub.datatype ilike '%bytea%'
|
||||
then '( select h.p_hash from pl_hashbytes(old.' || r_recsub.columnname ||') h limit 1 )::text '
|
||||
when r_recsub.datatype ilike '%int%' or r_recsub.datatype ilike '%numeric%' or r_recsub.datatype ilike '%real%'
|
||||
then ' old.'|| r_recsub.columnname ||'::text '
|
||||
else ' substr(old.'|| r_recsub.columnname ||'::text,1,1000) '
|
||||
end || ', null);'
|
||||
|| chr(13) || chr(10) ;
|
||||
|
||||
m_auditcol = m_auditcol || 'old.'|| r_recsub.columnname ||' IS DISTINCT FROM new.'|| r_recsub.columnname
|
||||
|| case when r_recsub.rownr % 3 = 0 then chr(13) || chr(10)
|
||||
else ''
|
||||
end || ' or ';
|
||||
end if;
|
||||
|
||||
if (r_recsub.columntype in (1,2))
|
||||
then
|
||||
m_pkfield = r_recsub.columnname;
|
||||
end if;
|
||||
|
||||
if r_recsub.columntype = 6
|
||||
then
|
||||
m_dkeyfield = r_recsub.columnname;
|
||||
end if;
|
||||
|
||||
end loop;
|
||||
|
||||
|
||||
|
||||
m_auditcol = m_auditcol || ' 1 = 0 ) then';
|
||||
|
||||
if nv(p_mode) in (0,3)
|
||||
then
|
||||
return query
|
||||
select r.scriptpriority::integer
|
||||
,r.scripttype::citext
|
||||
,r.objectname::citext
|
||||
,r.scriptcode::citext
|
||||
,r_record.schemaname::citext
|
||||
from (
|
||||
select
|
||||
340 as scriptpriority
|
||||
,'drop audit trigger' as scripttype
|
||||
,'t_audit_'||r_record.tablename as objectname
|
||||
,'do $C$
|
||||
begin
|
||||
SET LOCAL statement_timeout = ''10s'';
|
||||
DROP TRIGGER IF EXISTS t_audit_' || r_record.tablename
|
||||
||E'\n ON ' || r_record.schemaname || '.' || r_record.tablename ||';
|
||||
end;
|
||||
$C$
|
||||
' as scriptcode
|
||||
) r
|
||||
;
|
||||
end if;
|
||||
|
||||
if nv(m_pkfield) = ''
|
||||
then
|
||||
raise notice 'No Primary Key for %. Please fix this in f_getauditinfo.',r_record.tablename;
|
||||
--perform log_event(m_funcname,format(' as No Primary Key for %s %s. Please fix this in f_getauditinfo.' ,r_record.tablename, r_record.tableprefix),bt_enum('eventlog','error'));
|
||||
continue;
|
||||
end if;
|
||||
|
||||
if nv(p_mode) in (0,1)
|
||||
then
|
||||
|
||||
return query
|
||||
select r.scriptpriority::integer
|
||||
,r.scripttype::citext
|
||||
,r.objectname::citext
|
||||
,r.scriptcode::citext
|
||||
,r_record.schemaname::citext
|
||||
from (
|
||||
select 345 as scriptpriority
|
||||
,'create audit function ' as scripttype
|
||||
,r_record.schemaname || '.' || 'ft_audit_'||rtrim(r_record.tablename) as objectname
|
||||
,--------------------------- String ends at $QUOTED$ with comments.
|
||||
$QUOTED$
|
||||
CREATE OR REPLACE FUNCTION $QUOTED$ || r_record.schemaname || $QUOTED$.ft_audit_$QUOTED$ || rtrim(r_record.tablename) || $QUOTED$ (
|
||||
)
|
||||
RETURNS trigger AS
|
||||
$body$
|
||||
DECLARE
|
||||
--Error Handling--
|
||||
m_funcname text = 'ft_audit_$QUOTED$ || rtrim(r_record.tablename) || $QUOTED$';
|
||||
m_errmsg text;
|
||||
m_errcontext text;
|
||||
m_errdetail text;
|
||||
m_errhint text;
|
||||
m_errstate text;
|
||||
m_retval integer;
|
||||
--Error Handling--
|
||||
r_adtrec record;
|
||||
m_query text;
|
||||
m_user citext;
|
||||
m_atevent integer;
|
||||
BEGIN
|
||||
if core.not_triggerable(tg_name, tg_table_schema, tg_table_name,tg_op)
|
||||
then
|
||||
return null;
|
||||
end if;
|
||||
|
||||
if TG_TABLE_NAME::citext <> $QUOTED$ || quote_literal(r_record.tablename) || $QUOTED$::citext
|
||||
then
|
||||
IF (TG_OP = 'DELETE') THEN
|
||||
RETURN OLD;
|
||||
ELSIF (TG_OP = 'UPDATE') THEN
|
||||
RETURN NEW;
|
||||
ELSIF (TG_OP = 'INSERT') THEN
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
end if;
|
||||
|
||||
m_user = f_getuser();
|
||||
if m_user::citext in ('noaudit', 'importuser')
|
||||
or meta.f_audit_check(TG_TABLE_NAME::text, TG_OP::text, m_user::citext, row_to_json(NEW), row_to_json(OLD))
|
||||
then
|
||||
IF (TG_OP = 'DELETE') THEN
|
||||
RETURN OLD;
|
||||
ELSIF (TG_OP = 'UPDATE') THEN
|
||||
RETURN NEW;
|
||||
ELSIF (TG_OP = 'INSERT') THEN
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
end if;
|
||||
|
||||
$QUOTED$ ||
|
||||
-------------------core--------------
|
||||
case when r_record.savetoschema = 'core' then $QUOTED$
|
||||
if TG_OP = 'INSERT'
|
||||
then
|
||||
|
||||
with rows_ins as (
|
||||
insert into core.atevent (tablename, rid_parent, changeuser, changedate,changetime,actionx)
|
||||
values('$QUOTED$ || r_record.tablename || $QUOTED$', new.$QUOTED$ || m_pkfield || $QUOTED$, m_user, clock_timestamp()::date, clock_timestamp()::time, 1)
|
||||
returning rid_atevent
|
||||
)
|
||||
select rid_atevent
|
||||
from rows_ins
|
||||
into m_atevent;
|
||||
|
||||
$QUOTED$ || case
|
||||
when r_record.addversion = 1 then E'
|
||||
insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto) \n
|
||||
select m_atevent, $Q2$version$Q2$, null,(select version from version limit 1) \n
|
||||
where not exists (select 1 from core.atdetail ad where ad.rid_atevent = m_atevent and ad.datacolumn = $Q2$version$Q2$);
|
||||
' else '' end ||
|
||||
$QUOTED$
|
||||
|
||||
elsif TG_OP = 'UPDATE'
|
||||
then
|
||||
$QUOTED$ || m_auditcol || $QUOTED$
|
||||
with rows_ins as (
|
||||
insert into core.atevent (tablename, rid_parent, changeuser, changedate,changetime,actionx)
|
||||
values('$QUOTED$ || r_record.tablename || $QUOTED$', new.$QUOTED$ || m_pkfield || $QUOTED$, m_user, clock_timestamp()::date, clock_timestamp()::time, 2)
|
||||
returning rid_atevent
|
||||
)
|
||||
select rid_atevent
|
||||
from rows_ins
|
||||
into m_atevent;
|
||||
|
||||
$QUOTED$ || m_sql || $QUOTED$
|
||||
end if;
|
||||
|
||||
elsif TG_OP in ('DELETE', 'TRUNCATE')
|
||||
then
|
||||
with rows_ins as (
|
||||
insert into core.atevent(tablename, rid_parent, changeuser, changedate,changetime,actionx, rid_deletedparent)
|
||||
values('$QUOTED$ || r_record.tablename || $QUOTED$'
|
||||
, old.$QUOTED$ || m_pkfield || $QUOTED$
|
||||
, m_user
|
||||
, clock_timestamp()::date
|
||||
, clock_timestamp()::time
|
||||
, 3
|
||||
, $QUOTED$ || case when nv(m_dkeyfield) = '' then 'null' else format('old.%s',m_dkeyfield) end || $QUOTED$)
|
||||
returning rid_atevent
|
||||
)
|
||||
select rid_atevent
|
||||
from rows_ins
|
||||
into m_atevent;
|
||||
|
||||
$QUOTED$
|
||||
-------------------publio--------------
|
||||
else $QUOTED$
|
||||
if TG_OP = 'INSERT'
|
||||
then
|
||||
|
||||
with rows_ins as (
|
||||
insert into public.atevent(tableprefix, rid_parent, changeuser, changedate,changetime,actionx)
|
||||
values('$QUOTED$ || r_record.tableprefix || $QUOTED$', new.$QUOTED$ || m_pkfield || $QUOTED$, m_user, clock_timestamp()::date, f_tmtoint(clock_timestamp()::time), 1)
|
||||
returning rid_atevent
|
||||
)
|
||||
select rid_atevent
|
||||
from rows_ins
|
||||
into m_atevent;
|
||||
|
||||
$QUOTED$ || case
|
||||
when r_record.addversion = 1 then E'
|
||||
insert into public.atdetail(rid_atevent, datacolumn, changedfrom, changedto) \n
|
||||
select m_atevent, $Q2$version$Q2$, null,(select version from version limit 1) \n
|
||||
where not exists (select 1 from public.atdetail ad where ad.rid_atevent = m_atevent and ad.datacolumn = $Q2$version$Q2$);
|
||||
' else '' end ||
|
||||
$QUOTED$
|
||||
|
||||
elsif TG_OP = 'UPDATE'
|
||||
then
|
||||
$QUOTED$ || m_auditcol || $QUOTED$
|
||||
with rows_ins as (
|
||||
insert into public.atevent (tableprefix, rid_parent, changeuser, changedate,changetime,actionx)
|
||||
values('$QUOTED$ || r_record.tableprefix || $QUOTED$', new.$QUOTED$ || m_pkfield || $QUOTED$, m_user, clock_timestamp()::date, f_tmtoint(clock_timestamp()::time), 2)
|
||||
returning rid_atevent
|
||||
)
|
||||
select rid_atevent
|
||||
from rows_ins
|
||||
into m_atevent;
|
||||
|
||||
$QUOTED$ || m_sql || $QUOTED$
|
||||
end if;
|
||||
|
||||
elsif TG_OP in ('DELETE', 'TRUNCATE')
|
||||
then
|
||||
with rows_ins as (
|
||||
insert into public.atevent (tableprefix, rid_parent, changeuser, changedate,changetime,actionx, rid_deletedparent)
|
||||
values('$QUOTED$ || r_record.tableprefix || $QUOTED$'
|
||||
, old.$QUOTED$ || m_pkfield || $QUOTED$
|
||||
, m_user
|
||||
, clock_timestamp()::date
|
||||
, f_tmtoint(clock_timestamp()::time)
|
||||
, 3
|
||||
, $QUOTED$ || case when nv(m_dkeyfield) = '' then 'null' else format('old.%s',m_dkeyfield) end || $QUOTED$)
|
||||
returning rid_atevent
|
||||
)
|
||||
select rid_atevent
|
||||
from rows_ins
|
||||
into m_atevent;
|
||||
|
||||
$QUOTED$ end
|
||||
|
||||
|| $QUOTED$
|
||||
|
||||
$QUOTED$ || m_sql2 || $QUOTED$
|
||||
|
||||
end if;
|
||||
|
||||
|
||||
IF (TG_OP = 'DELETE') THEN
|
||||
RETURN OLD;
|
||||
ELSIF (TG_OP = 'UPDATE') THEN
|
||||
RETURN NEW;
|
||||
ELSIF (TG_OP = 'INSERT') THEN
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
RETURN NULL; -- result is ignored since this is an AFTER trigger
|
||||
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
GET STACKED DIAGNOSTICS
|
||||
m_errmsg = MESSAGE_TEXT
|
||||
,m_errcontext = PG_EXCEPTION_CONTEXT
|
||||
,m_errdetail = PG_EXCEPTION_DETAIL
|
||||
,m_errhint = PG_EXCEPTION_HINT
|
||||
,m_errstate = RETURNED_SQLSTATE;
|
||||
|
||||
m_errmsg := get_err_msg(m_funcname, m_errmsg, m_errcontext, m_errdetail, m_errhint, m_errstate);
|
||||
|
||||
RETURN NULL; -- result is ignored since this is an AFTER trigger
|
||||
END;
|
||||
$body$
|
||||
LANGUAGE 'plpgsql'
|
||||
VOLATILE
|
||||
CALLED ON NULL INPUT
|
||||
SECURITY INVOKER
|
||||
COST 1;
|
||||
$QUOTED$ as scriptcode
|
||||
---------------------------------------
|
||||
) r
|
||||
;
|
||||
end if;
|
||||
|
||||
if nv(p_mode) in (0,2)
|
||||
then
|
||||
|
||||
return query
|
||||
select r.scriptpriority::integer
|
||||
,r.scripttype::citext
|
||||
,r.objectname::citext
|
||||
,r.scriptcode::citext
|
||||
,r_record.schemaname::citext
|
||||
from (
|
||||
select distinct on (r_record.tablename)
|
||||
355 as scriptpriority
|
||||
,'create audit trigger' as scripttype
|
||||
,'t_audit_'||r_record.tablename as objectname
|
||||
,format($A$DO $TT$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_trigger
|
||||
WHERE tgname = 't_audit_%2$s'
|
||||
AND tgrelid = '%1$s.%2$s'::regclass
|
||||
) THEN
|
||||
-- Create the trigger if it doesn't exist
|
||||
CREATE TRIGGER t_audit_%2$s
|
||||
AFTER INSERT OR UPDATE OR DELETE
|
||||
ON %1$s.%2$s FOR EACH ROW
|
||||
EXECUTE PROCEDURE %1$s.ft_audit_%2$s();
|
||||
END IF;
|
||||
END;
|
||||
$TT$;
|
||||
$A$, r_record.schemaname,r_record.tablename) as scriptcode
|
||||
) r ;
|
||||
end if;
|
||||
|
||||
end loop;
|
||||
|
||||
|
||||
exception
|
||||
when others then
|
||||
GET STACKED DIAGNOSTICS
|
||||
m_errmsg = MESSAGE_TEXT
|
||||
,m_errcontext = PG_EXCEPTION_CONTEXT
|
||||
,m_errdetail = PG_EXCEPTION_DETAIL
|
||||
,m_errhint = PG_EXCEPTION_HINT
|
||||
,m_errstate = RETURNED_SQLSTATE;
|
||||
|
||||
m_errmsg := get_err_msg(m_funcname, m_errmsg, m_errcontext, m_errdetail, m_errhint, m_errstate);
|
||||
raise '%',m_errmsg;
|
||||
|
||||
end;
|
||||
|
||||
$$;
|
||||
173
examples/pgsql_meta_upgrade/audit/f_getauditinfo.sql
Normal file
173
examples/pgsql_meta_upgrade/audit/f_getauditinfo.sql
Normal file
@@ -0,0 +1,173 @@
|
||||
------------------------[CHANGE ID: 20171] 3/10/2016 10:11----------------
|
||||
select public.dropall('f_getauditinfo','meta');
|
||||
create or replace function meta.f_getauditinfo(
|
||||
p_tablename text
|
||||
)
|
||||
returns table(
|
||||
tablename citext
|
||||
, tableprefix citext
|
||||
, columnname citext
|
||||
, columndescription citext
|
||||
, columntype integer
|
||||
, datatype citext
|
||||
, specialtype integer
|
||||
, foreigntable citext
|
||||
, foreigncolumn citext
|
||||
, foreignpk citext
|
||||
, schemaname citext
|
||||
)
|
||||
language plpgsql stable
|
||||
as
|
||||
$$
|
||||
--columntype:
|
||||
-- 0=Normal Audited
|
||||
--, 1=Primary Key
|
||||
--, 2=Primary Key Audited
|
||||
--, 3=UserColumn
|
||||
--, 4=Normal Encrypted Audited
|
||||
--, 5 = Parent Table Primary Key for lookup (Must give Foreign Table)
|
||||
--, 6 = Parent Table Primary Key for delete (Must give Foreign Table)
|
||||
--, 9=View only, used for custom audit. e.g pplan
|
||||
--specialtype:
|
||||
-- 0,null = Leave alone.
|
||||
-- 1=Convert int date to sql date.
|
||||
-- 2=Convert int time to sql time.,
|
||||
-- 3=Status Field, --Note that when ColumnType is 3,Status field, the ForeignTable column must have the name of the search sent to F_GetDescription_Code
|
||||
-- 4=ForeignTable
|
||||
-- 5=ForeignTable, tableprefix
|
||||
-- 6 = Blob Column, will compare with size only
|
||||
-- 7 = Blob Column, will compare with hash only
|
||||
-- 8 = Hide
|
||||
declare
|
||||
--Error Handling--
|
||||
m_funcname text = 'f_getauditinfo';
|
||||
m_errmsg text;
|
||||
m_errcontext text;
|
||||
m_errdetail text;
|
||||
m_errhint text;
|
||||
m_errstate text;
|
||||
--Error Handling--
|
||||
m_tablename citext;
|
||||
m_schemaname citext;
|
||||
m_results record;
|
||||
begin
|
||||
if p_tablename ilike '%.%'
|
||||
then
|
||||
m_schemaname = split_part(p_tablename,'.',1);
|
||||
m_tablename = split_part(p_tablename,'.',2);
|
||||
else
|
||||
m_schemaname = '';
|
||||
m_tablename = p_tablename;
|
||||
end if;
|
||||
|
||||
return query
|
||||
|
||||
with tbl as (
|
||||
select t.table_name
|
||||
,t.table_schema
|
||||
from information_schema.tables t
|
||||
where t.table_schema not ilike 'pg_%'
|
||||
and t.table_schema not in ('information_schema','meta')
|
||||
and t.table_type = 'BASE TABLE'
|
||||
and t.table_name not in (
|
||||
'atdetail','atevent','eventlog','event_notify','async_jobs','async_broker','async_broker_stat',
|
||||
'version','upgrades','upgradescript','bt_enums','strmsession','strmsessionchunk',
|
||||
'upg_options','upgrade_procrun','process_act_list','server_script','generalaudit',
|
||||
'imapclient_instance','async_schedule','_loginsession','atview','authsession'
|
||||
)
|
||||
and t.table_name not ilike 'tmp\_%'
|
||||
and (
|
||||
m_tablename::citext = t.table_name::citext
|
||||
or nv(m_tablename) = ''
|
||||
)
|
||||
and (
|
||||
m_schemaname::citext = t.table_schema::citext
|
||||
or nv(m_schemaname) = ''
|
||||
)
|
||||
) , cols as (
|
||||
select tbl.*
|
||||
,exists (select 1
|
||||
from information_schema.table_constraints ct
|
||||
inner join information_schema.constraint_column_usage cu on cu.constraint_name = ct.constraint_name
|
||||
and cu.table_name = ct.table_name
|
||||
and cu.column_name = c.column_name
|
||||
where ct.table_schema = c.table_schema
|
||||
and ct.constraint_type = 'PRIMARY KEY'
|
||||
and ct.table_name = tbl.table_name
|
||||
) as isPk
|
||||
, exists (select 1
|
||||
from information_schema.table_constraints ct
|
||||
inner join information_schema.constraint_column_usage cu on cu.constraint_name = ct.constraint_name
|
||||
and cu.table_name = ct.table_name
|
||||
and cu.column_name = c.column_name
|
||||
where ct.table_schema = c.table_schema
|
||||
and ct.constraint_type = 'FOREIGN KEY'
|
||||
and ct.table_name = tbl.table_name
|
||||
) as isFk
|
||||
,c.column_name::text as column_name
|
||||
,(case when c.data_type = 'USER-DEFINED' then c.udt_name else c.data_type end)::text as data_type
|
||||
,f_tableprefix(tbl.table_name::text) as tableprefix
|
||||
,0::integer as specialtype
|
||||
from tbl
|
||||
inner join information_schema.columns c on c.table_name = tbl.table_name
|
||||
--and c.data_type not in ('USER-DEFINED','ARRAY','geometry','geography') --,'bytea'
|
||||
--and c.column_name not in ('createdby','createddate','updatedby','updateddate','deletedby','deleteddate')
|
||||
and c.column_name not ilike '%dropped%'
|
||||
and c.column_name not in ('prefix','updatecnt')
|
||||
), processed as (
|
||||
select *
|
||||
--,(select from cols b where b.table_name = a.table_name)
|
||||
from cols a
|
||||
)
|
||||
select distinct on (a.table_schema,a.table_name, a.column_name)
|
||||
a.table_name::citext as tablename
|
||||
,a.tableprefix::citext as tableprefix
|
||||
,a.column_name::citext as columnname
|
||||
,initcap(a.column_name)::citext as columndescription
|
||||
,(case when a.isPk then 2 else 0 end)::integer as columntype
|
||||
,a.data_type::citext as datatype
|
||||
,(case when a.data_type ilike '%bytea%'
|
||||
and nv(a.specialtype) not in (6,7,8) then 7
|
||||
else a.specialtype
|
||||
end)::integer as specialtype
|
||||
,''::citext as foreigntable
|
||||
,''::citext as foreigncolumn
|
||||
,''::citext as foreignpk
|
||||
,a.table_schema::citext as schemaname
|
||||
from processed a
|
||||
where exists (
|
||||
select 1
|
||||
from processed a2
|
||||
where a2.table_name = a.table_name
|
||||
and a2.table_schema = a.table_schema
|
||||
and a2.isPk
|
||||
)
|
||||
and exists (
|
||||
select 1
|
||||
from processed a2
|
||||
where a2.table_name = a.table_name
|
||||
and a2.table_schema = a.table_schema
|
||||
and a2.isPk
|
||||
and (a2.data_type ilike '%int%' or a2.data_type ilike '%serial%')
|
||||
)
|
||||
order by a.table_name, a.column_name
|
||||
;
|
||||
|
||||
|
||||
|
||||
exception
|
||||
when others then
|
||||
GET STACKED DIAGNOSTICS
|
||||
m_errmsg = MESSAGE_TEXT
|
||||
,m_errcontext = PG_EXCEPTION_CONTEXT
|
||||
,m_errdetail = PG_EXCEPTION_DETAIL
|
||||
,m_errhint = PG_EXCEPTION_HINT
|
||||
,m_errstate = RETURNED_SQLSTATE;
|
||||
|
||||
m_errmsg := get_err_msg(m_funcname, m_errmsg, m_errcontext, m_errdetail, m_errhint, m_errstate);
|
||||
raise '%',m_errmsg;
|
||||
|
||||
end;
|
||||
$$;
|
||||
|
||||
---select * from meta.f_getauditinfo('syncprocess')
|
||||
90
examples/pgsql_meta_upgrade/get_table_fields.sql
Normal file
90
examples/pgsql_meta_upgrade/get_table_fields.sql
Normal file
@@ -0,0 +1,90 @@
|
||||
--select * from dropall('meta.get_table_fields');
|
||||
CREATE OR REPLACE FUNCTION meta.get_table_fields(
|
||||
p_schema_name text
|
||||
,p_table_name text
|
||||
)
|
||||
RETURNS JSONB
|
||||
LANGUAGE plpgsql STABLE
|
||||
cost 10
|
||||
SECURITY DEFINER
|
||||
AS
|
||||
$$
|
||||
DECLARE
|
||||
a_primary_keys citext[];
|
||||
a_foreign_keys citext[];
|
||||
BEGIN
|
||||
|
||||
SELECT array_agg(kcu.column_name::citext)
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.key_column_usage kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
AND tc.table_schema = kcu.table_schema
|
||||
WHERE
|
||||
tc.constraint_type = 'PRIMARY KEY'
|
||||
AND tc.table_schema = p_schema_name::citext
|
||||
AND tc.table_name = p_table_name::citext
|
||||
INTO a_primary_keys
|
||||
;
|
||||
|
||||
SELECT array_agg(kcu.column_name::citext)
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.key_column_usage kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
AND tc.table_schema = kcu.table_schema
|
||||
WHERE
|
||||
tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_schema = p_schema_name::citext
|
||||
AND tc.table_name = p_table_name::citext
|
||||
INTO a_foreign_keys
|
||||
;
|
||||
|
||||
return (
|
||||
SELECT jsonb_object_agg(col.column_name::citext
|
||||
, jsonb_build_object(
|
||||
'type', case
|
||||
when col.data_type::citext = 'USER-DEFINED' then col.udt_name::citext
|
||||
else col.data_type::citext
|
||||
end
|
||||
, 'primary_key', case when col.column_name = any (a_primary_keys) then true else false end
|
||||
, 'foreign_key', case when col.column_name = any (a_foreign_keys) then true else false end
|
||||
,'relation', (
|
||||
select to_jsonb(r)
|
||||
from (
|
||||
SELECT DISTINCT
|
||||
c.table_schema AS parent_schema
|
||||
, c.table_name AS parent_table
|
||||
, cc.column_name AS parent_column
|
||||
--, kcu.column_name AS relation_column
|
||||
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.constraint_column_usage ccu
|
||||
ON tc.constraint_name = ccu.constraint_name
|
||||
AND tc.table_schema = ccu.table_schema
|
||||
JOIN information_schema.key_column_usage kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
AND tc.table_schema = kcu.table_schema
|
||||
JOIN information_schema.referential_constraints rc
|
||||
ON tc.constraint_name = rc.constraint_name
|
||||
JOIN information_schema.constraint_column_usage cc
|
||||
ON rc.unique_constraint_name = cc.constraint_name
|
||||
JOIN information_schema.constraint_table_usage c
|
||||
ON rc.unique_constraint_name = c.constraint_name
|
||||
WHERE
|
||||
tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_schema = p_schema_name::citext
|
||||
AND tc.table_name = p_table_name::citext
|
||||
and kcu.column_name = col.column_name
|
||||
) r
|
||||
)
|
||||
)
|
||||
)
|
||||
FROM information_schema.columns col
|
||||
WHERE
|
||||
col.table_schema = p_schema_name::citext
|
||||
AND col.table_name = p_table_name::citext
|
||||
);
|
||||
|
||||
END;
|
||||
$$;
|
||||
|
||||
--select meta.get_table_fields('core','masterprocess')
|
||||
1527
examples/pgsql_meta_upgrade/migration_build.sql
Normal file
1527
examples/pgsql_meta_upgrade/migration_build.sql
Normal file
File diff suppressed because it is too large
Load Diff
369
examples/pgsql_meta_upgrade/migration_init.sql
Normal file
369
examples/pgsql_meta_upgrade/migration_init.sql
Normal file
@@ -0,0 +1,369 @@
|
||||
select * from dropall('migration_init','meta');
|
||||
CREATE OR REPLACE FUNCTION meta.migration_init(
|
||||
p_version text default ''
|
||||
,INOUT p_info jsonb default null
|
||||
,OUT p_retval integer
|
||||
,OUT p_errmsg text
|
||||
)
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
SECURITY DEFINER
|
||||
AS
|
||||
$$
|
||||
DECLARE
|
||||
m_except _except_type;
|
||||
|
||||
m_dblink_conn TEXT;
|
||||
m_dblink_login TEXT;
|
||||
m_pid integer;
|
||||
m_result text;
|
||||
m_gotemplate text;
|
||||
m_qry text;
|
||||
m_id_model integer;
|
||||
BEGIN
|
||||
m_except.func_name = 'migration_init';
|
||||
|
||||
p_retval = 0;
|
||||
p_errmsg = '';
|
||||
m_dblink_conn := 'dblink_migration_boot';
|
||||
m_pid = pg_backend_pid();
|
||||
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',1,'objecttype',m_except.func_name, 'tm',clock_timestamp()::text)::text);
|
||||
|
||||
if p_info is null
|
||||
then
|
||||
p_info = jsonb_build_object();
|
||||
end if;
|
||||
|
||||
select f.id_migration_model
|
||||
from meta.migration_model f
|
||||
where f.version = p_version
|
||||
or coalesce(p_version,'') = ''
|
||||
order by f.changedat desc
|
||||
limit 1
|
||||
into m_id_model;
|
||||
|
||||
if coalesce(m_id_model,0) = 0
|
||||
then
|
||||
raise exception 'Model/File does not exist for version %',p_version;
|
||||
end if;
|
||||
|
||||
|
||||
-- delete from meta.migration_script s
|
||||
-- where s.rid_migration_model = m_id_model or s.rid_migration_model is null;
|
||||
--
|
||||
-- delete from meta.migration_relation s
|
||||
-- where s.rid_migration_model = m_id_model or s.rid_migration_model is null;
|
||||
--
|
||||
-- delete from meta.migration_index s
|
||||
-- where s.rid_migration_model = m_id_model or s.rid_migration_model is null;
|
||||
--
|
||||
-- delete from meta.migration_table s
|
||||
-- where s.rid_migration_model = m_id_model or s.rid_migration_model is null;
|
||||
|
||||
m_gotemplate = $CQl$do $M$
|
||||
declare
|
||||
m_retval integer;
|
||||
m_errmsg text;
|
||||
m_info jsonb;
|
||||
m_file bytea;
|
||||
m_version text;
|
||||
m_rid_model integer;
|
||||
begin
|
||||
m_version = $CQl$ || quote_literal(p_version) || $CQl$;
|
||||
m_rid_model = $CQl$ ||m_id_model || $CQl$;
|
||||
perform log_event($CQl$ || quote_literal(m_except.func_name) || $CQl$ ,'[message]',bt_enum('eventlog','upgrade'),1);
|
||||
|
||||
[proc]
|
||||
end;
|
||||
$M$;
|
||||
$CQl$;
|
||||
|
||||
select 'host=127.0.0.1 dbname='||r.dbname||' port=' || r.port || ' user='|| r.loginuser ||' password=' || r.password
|
||||
from f_get_local_conn('migration') r
|
||||
into m_dblink_login;
|
||||
|
||||
if not exists (SELECT T.text from unnest(dblink_get_connections()) T where T.text = m_dblink_conn)
|
||||
then
|
||||
PERFORM dblink_connect(m_dblink_conn, m_dblink_login);
|
||||
end if;
|
||||
|
||||
perform log_event(m_except.func_name,format('migrations init with connection %s',m_dblink_conn),bt_enum('eventlog','upgrade'));
|
||||
|
||||
PERFORM dblink_exec(m_dblink_conn, format($S$set application_name to 'MIGRATION:%s'; $S$, m_pid), true);
|
||||
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Reading migration models'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_read(m_rid_model) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to read model: %', m_errmsg;
|
||||
end if;
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
-- raise notice 'Status - Reading migration models: % Qry:%', m_result , m_qry;
|
||||
|
||||
---Prime old upgrades
|
||||
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Inspecting database'),'[proc]',$CQl$
|
||||
if exists (
|
||||
select 1
|
||||
from information_schema.tables t
|
||||
where
|
||||
t.table_name = 'upgrades'
|
||||
and t.table_schema = 'public'
|
||||
)
|
||||
then
|
||||
insert into public.upgrades(dbversion,status)
|
||||
select m.version, 0
|
||||
from meta.migration_model m
|
||||
where m.id_migration_model = m_rid_model
|
||||
and not exists (select 1 from public.upgrades s2 where s2.dbversion = m.version)
|
||||
;
|
||||
|
||||
end if;
|
||||
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
|
||||
--inspect
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Inspecting database'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_inspect(m_rid_model,$CQl$ || quote_literal(p_info::text) || $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to inspect database: %', m_errmsg;
|
||||
end if;
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
-- raise notice 'Status - Inspecting database: % Qry:%', m_result, m_qry;
|
||||
|
||||
--Drops
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Build Differential (Drops)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_build(m_rid_model,'drop',$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to build differential: % ', m_errmsg;
|
||||
end if;
|
||||
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
--Run Drops
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Running Migration Scripts (Drops)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_run(m_rid_model,0,200,$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to run differentials: %', m_errmsg;
|
||||
end if;
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
--Renames
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Inspecting database (Renames)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_inspect(m_rid_model,$CQl$ || quote_literal(p_info::text) || $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to inspect database: %', m_errmsg;
|
||||
end if;
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
-- raise notice 'Status - Inspecting database: % Qry:%', m_result, m_qry;
|
||||
|
||||
--Run rename
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Build Differential (Renames)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_build(m_rid_model,'rename',$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to build differential: % ', m_errmsg;
|
||||
end if;
|
||||
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
|
||||
--Functions
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Build Differential (Renames)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_build(m_rid_model,'functions',$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to build differential: % ', m_errmsg;
|
||||
end if;
|
||||
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Update legacy'),'[proc]',$CQl$
|
||||
if exists (
|
||||
select 1
|
||||
from information_schema.tables t
|
||||
where
|
||||
t.table_name = 'upgrades'
|
||||
and t.table_schema = 'public'
|
||||
)
|
||||
then
|
||||
|
||||
update public.upgrades s
|
||||
set totalscripts = r.total
|
||||
,failedscripts = r.error
|
||||
,scriptsdone = r.done
|
||||
from (
|
||||
select count(1) filter (where s.status <> 4) as total
|
||||
, count(1) filter (where s.status = 3) as error
|
||||
, count(1) filter (where s.status = 2 ) as done
|
||||
from meta.migration_script s
|
||||
where s.rid_migration_model = m_rid_model
|
||||
) r
|
||||
where s.dbversion in (select m.version
|
||||
from meta.migration_model m
|
||||
where m.id_migration_model = m_rid_model
|
||||
);
|
||||
|
||||
end if;
|
||||
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
--Run Functions
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Running Migration Scripts (Renames)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_run(m_rid_model,0,200,$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to run differentials: %', m_errmsg;
|
||||
end if;
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
|
||||
---Rest
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Inspecting database (2nd)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_inspect(m_rid_model,$CQl$ || quote_literal(p_info::text) || $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to inspect database: %', m_errmsg;
|
||||
end if;
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
-- Rest
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Build Differential (All)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_build(m_rid_model,'all',$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to build differential: % ', m_errmsg;
|
||||
end if;
|
||||
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
-- Run Rest
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Running Migration Scripts (2nd)'),'[proc]',$CQl$
|
||||
select r.p_retval, r.p_errmsg, r.p_info
|
||||
from meta.migration_run(m_rid_model,0,10000,$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
|
||||
into m_retval, m_errmsg,m_info;
|
||||
|
||||
if m_retval > 0
|
||||
then
|
||||
raise exception 'Failed to run differentials: %', m_errmsg;
|
||||
end if;
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
|
||||
m_qry = replace(replace(m_gotemplate,'[message]','Update legacy'),'[proc]',$CQl$
|
||||
if exists (
|
||||
select 1
|
||||
from information_schema.tables t
|
||||
where
|
||||
t.table_name = 'upgrades'
|
||||
and t.table_schema = 'public'
|
||||
)
|
||||
then
|
||||
|
||||
update public.upgrades s
|
||||
set totalscripts = r.total
|
||||
,failedscripts = r.error
|
||||
,scriptsdone = r.done
|
||||
from (
|
||||
select count(1) filter (where s.status <> 4) as total
|
||||
, count(1) filter (where s.status = 3) as error
|
||||
, count(1) filter (where s.status = 2 ) as done
|
||||
from meta.migration_script s
|
||||
where s.rid_migration_model = m_rid_model
|
||||
) r
|
||||
where s.dbversion in (select m.version
|
||||
from meta.migration_model m
|
||||
where m.id_migration_model = m_rid_model
|
||||
);
|
||||
|
||||
end if;
|
||||
|
||||
$CQl$);
|
||||
m_result = dblink_exec(m_dblink_conn, m_qry, true);
|
||||
|
||||
|
||||
|
||||
--raise notice 'Status - Running Migration Scripts: % Qry:%', m_result, m_qry;
|
||||
|
||||
perform dblink_disconnect(m_dblink_conn);
|
||||
perform log_event(m_except.func_name,'Migration process completed. Check the scripts status for errors',bt_enum('eventlog','upgrade'));
|
||||
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',2,'objecttype',m_except.func_name, 'tm',clock_timestamp()::text)::text);
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
GET STACKED DIAGNOSTICS
|
||||
m_except.message_text = MESSAGE_TEXT
|
||||
, m_except.pg_exception_context = PG_EXCEPTION_CONTEXT
|
||||
, m_except.pg_exception_detail = PG_EXCEPTION_DETAIL
|
||||
, m_except.pg_exception_hint = PG_EXCEPTION_HINT
|
||||
, m_except.returned_sqlstate = RETURNED_SQLSTATE
|
||||
, m_except.schema_name = SCHEMA_NAME
|
||||
, m_except.table_name = TABLE_NAME
|
||||
, m_except.pg_datatype_name = PG_DATATYPE_NAME
|
||||
, m_except.constraint_name = CONSTRAINT_NAME
|
||||
, m_except.column_name = COLUMN_NAME
|
||||
;
|
||||
|
||||
p_errmsg = _except(m_except);
|
||||
p_retval = 1;
|
||||
perform log_event(m_except.func_name,format('Migration error: %s',p_errmsg),bt_enum('eventlog','upgrade'));
|
||||
|
||||
if exists (SELECT T.text from unnest(dblink_get_connections()) T where T.text = m_dblink_conn)
|
||||
then
|
||||
perform dblink_disconnect(m_dblink_conn);
|
||||
end if;
|
||||
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',3,'error',p_errmsg,'objecttype',m_except.func_name, 'tm',clock_timestamp()::text)::text);
|
||||
END;
|
||||
$$;
|
||||
445
examples/pgsql_meta_upgrade/migration_inspect.sql
Normal file
445
examples/pgsql_meta_upgrade/migration_inspect.sql
Normal file
@@ -0,0 +1,445 @@
|
||||
--select * from dropall('migration_inspect','meta');
|
||||
CREATE OR REPLACE FUNCTION meta.migration_inspect(
|
||||
p_id_migration_model integer default null
|
||||
,INOUT p_info jsonb default null
|
||||
,OUT p_retval integer
|
||||
,OUT p_errmsg text
|
||||
)
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
SECURITY DEFINER
|
||||
AS
|
||||
$$
|
||||
DECLARE
|
||||
--Error Handling--
|
||||
m_funcname text = 'meta.migration_inspect';
|
||||
m_errmsg text;
|
||||
m_errcontext text;
|
||||
m_errdetail text;
|
||||
m_errhint text;
|
||||
m_errstate text;
|
||||
m_retval integer;
|
||||
--Error Handling--
|
||||
m_rowcnt integer;
|
||||
BEGIN
|
||||
p_retval = 0;
|
||||
p_errmsg = '';
|
||||
|
||||
--perform log_event(m_funcname,'init',bt_enum('eventlog','local notice'));
|
||||
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',1,'objecttype',m_funcname)::text);
|
||||
|
||||
delete from meta.migration_table
|
||||
where isdb;
|
||||
|
||||
insert into meta.migration_table(
|
||||
rid_migration_model
|
||||
,tablename
|
||||
,guid
|
||||
,schemaname
|
||||
,isdb
|
||||
,ismodel
|
||||
)
|
||||
select p_id_migration_model
|
||||
,lower(T.table_name)
|
||||
,obj_description(format('%s.%s',t.table_schema,t.table_name)::regclass, 'pg_class')
|
||||
,t.table_schema
|
||||
,true
|
||||
,false
|
||||
from INFORMATION_SCHEMA.tables t
|
||||
left outer join meta.f_upgrade_table(t.table_schema) tu on lower(tu.tablename) = lower(t.table_name)
|
||||
where t.table_type = 'BASE TABLE'
|
||||
and t.table_schema not ilike 'pg_%'
|
||||
and t.table_schema not ilike 'information_schema%'
|
||||
and tu.tablename is null
|
||||
and (
|
||||
t.table_schema in (
|
||||
select distinct mt.schemaname
|
||||
from meta.migration_table mt
|
||||
where mt.ismodel
|
||||
)
|
||||
or t.table_schema = 'public'
|
||||
)
|
||||
;
|
||||
|
||||
insert into meta.migration_column(rid_migration_table, columnname, columntype, columnlen, precision, prefix, defaultval, indextype, guid, seqseed, ispk,sequence)
|
||||
with det as (
|
||||
select
|
||||
obj_description(format('%s.%s', c.table_schema, c.table_name)::regclass, 'pg_class') as tbl_guid
|
||||
, lower(c.column_name) as colname
|
||||
, col_description(format('%s.%s', c.table_schema, c.table_name)::regclass, c.ordinal_position) as col_guid
|
||||
, case when c.data_type::citext = 'user-defined' then c.udt_name else c.data_type end
|
||||
|| (case
|
||||
when c.data_type::citext = 'numeric' then '(' || c.numeric_precision
|
||||
||
|
||||
(case when c.numeric_scale = 0 then '' else ',' || c.numeric_scale end)
|
||||
|| ')'
|
||||
else ''
|
||||
end) as coltype
|
||||
,'(' || c.numeric_precision
|
||||
||
|
||||
(case when c.numeric_scale = 0 then '' else ',' || c.numeric_scale end)
|
||||
|| ')' as numeric_precision_str
|
||||
, COALESCE(pc.constraint_len,null)::integer as constraint_len
|
||||
|
||||
,COALESCE(defval.expr,'') as defaultvalue
|
||||
,mt.id_migration_table
|
||||
,c.ordinal_position::int as pos
|
||||
,exists (
|
||||
select ct.constraint_name,ct.table_schema,ct.table_name, cu.column_name
|
||||
from information_schema.table_constraints ct
|
||||
inner join information_schema.constraint_column_usage cu on cu.constraint_name = ct.constraint_name
|
||||
and cu.table_name = ct.table_name
|
||||
and cu.table_schema = ct.table_schema
|
||||
and cu.table_schema not in ('pg_catalog','')
|
||||
where ct.constraint_type = 'PRIMARY KEY'
|
||||
and ct.table_schema = c.table_schema
|
||||
and ct.table_name = c.table_name
|
||||
and cu.column_name = c.column_name
|
||||
) as ispk
|
||||
from INFORMATION_SCHEMA.columns c
|
||||
inner join meta.migration_table mt on lower(mt.tablename) = lower(c.table_name)
|
||||
and lower(mt.schemaname) = lower(c.table_schema)
|
||||
and mt.isdb
|
||||
left join lateral (
|
||||
select pc1.conname
|
||||
,obj_description(pc1.oid, 'pg_constraint') as constraint_len
|
||||
from pg_constraint pc1
|
||||
where pc1.conname = 'chk_' || c.table_name || '_' || c.column_name
|
||||
limit 1
|
||||
) pc on true
|
||||
left join lateral (
|
||||
SELECT pg_get_expr(pad.adbin, pad.adrelid)::text as expr
|
||||
FROM pg_attrdef pad
|
||||
, pg_attribute pat
|
||||
, pg_class pc
|
||||
, pg_namespace ns
|
||||
WHERE
|
||||
pc.relname = c.table_name
|
||||
and pc.relnamespace = ns.oid
|
||||
and ns.nspname::text = c.table_schema
|
||||
AND pc.oid = pat.attrelid
|
||||
AND pat.attname = c.column_name
|
||||
AND pat.attrelid = pad.adrelid
|
||||
AND pat.attnum = pad.adnum
|
||||
) defval on true
|
||||
|
||||
)
|
||||
select det.id_migration_table
|
||||
,det.colname
|
||||
,det.coltype
|
||||
,det.constraint_len
|
||||
,det.numeric_precision_str
|
||||
,null --prefix
|
||||
,det.defaultvalue
|
||||
,'primary key'
|
||||
,det.col_guid
|
||||
,null::bigint as seed
|
||||
, det.ispk
|
||||
,det.pos
|
||||
from det
|
||||
;
|
||||
|
||||
----Indexes
|
||||
with indexes as (
|
||||
select
|
||||
i1.id_migration_table
|
||||
, lower(i1.indexname) as indexname
|
||||
, i1.schemaname
|
||||
, i1.tablename
|
||||
, i1.indisprimary as ispk
|
||||
, not i1.indisunique as isduplicate
|
||||
, i1.indisunique or i1.indisprimary as isunique
|
||||
, i1.ikeyno + 1 as seq
|
||||
,i1.oid::text as guid
|
||||
, lower(a.attname) as colname
|
||||
, row_number() over (partition by i1.indexname order by a.attname) as rnidx
|
||||
from (
|
||||
select
|
||||
obj_description(format('%s.%s', ns.nspname, t.relname)::regclass, 'pg_class') tableident
|
||||
, t.oid
|
||||
, c.relname as indexname
|
||||
, i.indisprimary
|
||||
, i.indisunique as indisunique
|
||||
, i.indkey
|
||||
, unnest(i.indkey) as ikey
|
||||
, generate_subscripts(i.indkey, 1) as ikeyno
|
||||
, mt.id_migration_table
|
||||
, mt.tablename
|
||||
,mt.schemaname
|
||||
FROM pg_catalog.pg_class c
|
||||
inner join pg_catalog.pg_namespace n on n.oid = c.relnamespace
|
||||
inner join pg_catalog.pg_index i on i.indexrelid = c.oid
|
||||
inner join pg_catalog.pg_class t on t.oid = i.indrelid
|
||||
inner join pg_catalog.pg_namespace ns on ns.OID = t.relnamespace
|
||||
inner join meta.migration_table mt on lower(mt.tablename) = lower(t.relname)
|
||||
and lower(mt.schemaname) = lower(n.nspname::text)
|
||||
and mt.isdb
|
||||
left outer join information_schema.table_constraints cs on cs.constraint_type not in ('CHECK', 'FOREIGN KEY')
|
||||
and cs.constraint_name = c.relname
|
||||
and cs.table_name = t.relname
|
||||
WHERE c.relkind::citext = 'i'
|
||||
) i1
|
||||
inner join pg_attribute a on a.attrelid = i1.oid
|
||||
and a.attnum = ikey
|
||||
|
||||
|
||||
), insdex as (
|
||||
insert into meta.migration_index( rid_migration_model, rid_migration_table, indexname, ispk, isduplicate, sequence, guid, isdb,ismodel, isunique)
|
||||
select p_id_migration_model, r.id_migration_table,r.indexname,r.ispk,nv(r.isduplicate,false),r.seq, r.guid,true,false, nv(r.isunique,false)
|
||||
from indexes r
|
||||
where r.rnidx = 1
|
||||
returning id_migration_index,rid_migration_table,indexname,guid
|
||||
), inscol as (
|
||||
insert into meta.migration_index_col(rid_migration_index, rid_migration_column_parent, options, sequence)
|
||||
select i.id_migration_index, mc.id_migration_column, null, r.seq
|
||||
from indexes r
|
||||
inner join insdex i on i.rid_migration_table = r.id_migration_table
|
||||
and i.indexname = r.indexname
|
||||
inner join meta.migration_column mc on mc.rid_migration_table = r.id_migration_table
|
||||
and lower(mc.columnname) = lower(r.colname)
|
||||
returning *
|
||||
)
|
||||
select count(1)
|
||||
into m_rowcnt
|
||||
;
|
||||
|
||||
----Relations
|
||||
with cnt as (
|
||||
select T.relname as parent_tablename
|
||||
,tns.nspname as parent_schemaname
|
||||
,t1.relname as child_tablename
|
||||
,t1ns.nspname as child_schemaname
|
||||
,obj_description(format('%s.%s',tns.nspname,t.relname)::regclass, 'pg_class') as parent_guid
|
||||
,obj_description(format('%s.%s',t1ns.nspname,t1.relname) ::regclass, 'pg_class') as child_guid
|
||||
,t.oid parent_oid
|
||||
,t1.oid child_oid
|
||||
,c.confupdtype
|
||||
,c.confdeltype
|
||||
,c.conname as constraint_name
|
||||
,par.key as parentcolumn
|
||||
,generate_subscripts(c.conkey,1) rpkeyno
|
||||
,child.key as childcolumn
|
||||
,generate_subscripts(c.conkey,1) rckeyno
|
||||
from pg_constraint c
|
||||
inner join pg_catalog.pg_class t on t.oid = c.conrelid
|
||||
inner join pg_catalog.pg_namespace tns on tns.oid = t.relnamespace
|
||||
inner join pg_catalog.pg_class t1 on t1.oid = c.confrelid
|
||||
inner join pg_catalog.pg_namespace t1ns on t1ns.oid = t1.relnamespace
|
||||
cross join unnest(c.conkey) par(key)
|
||||
cross join unnest(c.confkey) child(key)
|
||||
where c.contype = 'f'
|
||||
), det as (
|
||||
select
|
||||
r1.parent_guid
|
||||
, lower(r1.constraint_name) as constraint_name
|
||||
, r1.child_guid
|
||||
, (case r1.confdeltype::citext
|
||||
when 'r' then 'restrict_server' --restrict server
|
||||
when 'c' then 'cascade_server' --cascade server
|
||||
when 'n' then 'set null'
|
||||
when 'd' then 'set default'
|
||||
else 'no_action'
|
||||
end) as deleteconstraint
|
||||
, (case r1.confupdtype::citext
|
||||
when 'r' then 'restrict_server' --restrict server
|
||||
when 'c' then 'cascade_server' --cascade server
|
||||
when 'n' then 'set null'
|
||||
when 'd' then 'set default'
|
||||
else 'no_action'
|
||||
end) as updateconstraint
|
||||
, lower(ap.attname) as parent_column
|
||||
, lower(ac.attname) as child_column
|
||||
, r1.rpkeyno
|
||||
, row_number() over (partition by constraint_name order by ap.attname, ac.attname) as rncnt
|
||||
,r1.parent_tablename
|
||||
,r1.parent_schemaname
|
||||
,r1.child_tablename
|
||||
,r1.child_schemaname
|
||||
,mtp.id_migration_table as id_migration_table_parent
|
||||
,mtc.id_migration_table as id_migration_table_child
|
||||
from cnt r1
|
||||
inner join pg_attribute ap on ap.attrelid = r1.parent_oid
|
||||
and ap.attnum = parentcolumn
|
||||
inner join pg_attribute ac on ac.attrelid = r1.child_oid
|
||||
and ac.attnum = childcolumn
|
||||
inner join meta.migration_table mtp on mtp.tablename = r1.parent_tablename
|
||||
and mtp.schemaname = r1.parent_schemaname
|
||||
and mtp.isdb
|
||||
inner join meta.migration_table mtc on mtc.tablename = r1.child_tablename
|
||||
and mtc.schemaname = r1.child_schemaname
|
||||
and mtc.isdb
|
||||
order by
|
||||
r1.child_tablename
|
||||
, r1.constraint_name
|
||||
, r1.rpkeyno
|
||||
, r1.rckeyno
|
||||
), ins as (
|
||||
insert into meta.migration_relation(rid_migration_model ,rid_migration_table_parent, rid_migration_table_child, relationname, updateconstraint
|
||||
, deleteconstraint, sequence,isdb,ismodel
|
||||
)
|
||||
select p_id_migration_model, det.id_migration_table_parent, det.id_migration_table_child, det.constraint_name,det.updateconstraint, det.deleteconstraint,null
|
||||
,true,false
|
||||
from det
|
||||
where det.rncnt = 1
|
||||
returning *
|
||||
), inscol as (
|
||||
insert into meta.migration_relation_col(rid_migration_relation, rid_migration_column_parent, rid_migration_column_child, sequence)
|
||||
select ins.id_migration_relation, colp.id_migration_column, colc.id_migration_column
|
||||
,det.rpkeyno
|
||||
from det
|
||||
inner join ins on ins.relationname = det.constraint_name
|
||||
and ins.rid_migration_table_parent = det.id_migration_table_parent
|
||||
and ins.rid_migration_table_child = det.id_migration_table_child
|
||||
inner join meta.migration_column colc on colc.rid_migration_table = ins.rid_migration_table_child
|
||||
and colc.columnname = det.child_column
|
||||
inner join meta.migration_column colp on colp.rid_migration_table = ins.rid_migration_table_child
|
||||
and colc.columnname = det.parent_column
|
||||
returning *
|
||||
)
|
||||
select count(1)
|
||||
from inscol
|
||||
into m_rowcnt
|
||||
;
|
||||
|
||||
|
||||
---Load all triggers
|
||||
insert into meta.migration_object(rid_migration_model,rid_migration_table, objecttype,objectname, schema, ismodel, isdb)
|
||||
select distinct on (mt.id_migration_table,tr.trigger_name)
|
||||
p_id_migration_model
|
||||
,mt.id_migration_table
|
||||
,'trigger'
|
||||
,tr.trigger_name
|
||||
,mt.schemaname
|
||||
,false
|
||||
,true
|
||||
from INFORMATION_SCHEMA.triggers tr
|
||||
inner join meta.migration_table mt on mt.isdb
|
||||
and lower(mt.schemaname) = lower(tr.event_object_schema)
|
||||
and lower(mt.tablename) = lower(tr.event_object_table)
|
||||
;
|
||||
|
||||
---Load all sequences
|
||||
insert into meta.migration_object(rid_migration_model,objecttype,rid_migration_table,objectname, schema, ismodel, isdb)
|
||||
select distinct on (seq.sequence_name, seq.schemaname,mt.id_migration_table)
|
||||
p_id_migration_model
|
||||
,'sequence'
|
||||
,mt.id_migration_table
|
||||
,seq.sequence_name
|
||||
,seq.schemaname
|
||||
,false
|
||||
,true
|
||||
from (
|
||||
SELECT seqclass.relname AS sequence_name,
|
||||
ns.nspname as schemaname,
|
||||
depclass.relname AS table_name,
|
||||
(
|
||||
select array_agg(attrib.attname)
|
||||
from pg_attribute attrib
|
||||
where attrib.attnum = dep.refobjsubid
|
||||
AND attrib.attrelid = dep.refobjid
|
||||
) as cols
|
||||
FROM pg_class AS seqclass
|
||||
join pg_namespace ns on ns.oid = seqclass.relnamespace
|
||||
and lower(ns.nspname::text) in (
|
||||
select distinct mt.schemaname
|
||||
from meta.migration_table mt
|
||||
where mt.isdb
|
||||
)
|
||||
JOIN pg_sequence AS seq
|
||||
ON ( seq.seqrelid = seqclass.relfilenode )
|
||||
left outer JOIN pg_depend AS dep
|
||||
ON ( seq.seqrelid = dep.objid )
|
||||
left outer JOIN pg_class AS depclass
|
||||
ON ( dep.refobjid = depclass.relfilenode )
|
||||
) seq
|
||||
left outer join meta.migration_table mt on mt.isdb
|
||||
and lower(mt.schemaname) = lower(seq.schemaname)
|
||||
and lower(mt.tablename) = lower(seq.table_name)
|
||||
;
|
||||
|
||||
insert into meta.migration_object(rid_migration_model,objecttype,rid_migration_column,rid_migration_table,objectname, schema, ismodel, isdb,body)
|
||||
SELECT distinct on (col.id_migration_column, tbl.id_migration_table)
|
||||
p_id_migration_model
|
||||
,'check_constraint'
|
||||
,col.id_migration_column
|
||||
,tbl.id_migration_table
|
||||
,conname AS constraint_name,
|
||||
ns.nspname AS schema,
|
||||
false,
|
||||
true,
|
||||
pg_get_constraintdef(c.oid) AS constraint_definition
|
||||
FROM pg_constraint c
|
||||
JOIN pg_attribute a ON a.attrelid = c.conrelid
|
||||
AND a.attnum = ANY(c.conkey)
|
||||
inner join pg_class tc on tc.oid = c.conrelid
|
||||
inner join pg_namespace ns on ns.oid = tc.relnamespace
|
||||
inner join meta.migration_table tbl on tbl.isdb
|
||||
and lower(tbl.schemaname) = lower( ns.nspname)
|
||||
and lower(tbl.tablename) = lower( tc.relname)
|
||||
inner join meta.migration_column col on col.rid_migration_table = tbl.id_migration_table
|
||||
and lower(col.columnname) = lower(a.attname)
|
||||
WHERE contype = 'c';
|
||||
|
||||
---Views on a table
|
||||
insert into meta.migration_object(rid_migration_model,rid_migration_table, objecttype,objectname, schema, ismodel, isdb)
|
||||
select distinct on (v.oid,mt.id_migration_table)
|
||||
p_id_migration_model
|
||||
,mt.id_migration_table
|
||||
,'view'
|
||||
,v.oid::regclass AS view
|
||||
,mt.schemaname
|
||||
,false
|
||||
,true
|
||||
FROM pg_depend AS d -- objects that depend on the table
|
||||
JOIN pg_rewrite AS r -- rules depending on the table
|
||||
ON r.oid = d.objid
|
||||
JOIN pg_class AS v -- views for the rules
|
||||
ON v.oid = r.ev_class
|
||||
inner join pg_class as tbl on tbl.oid = d.refobjid
|
||||
inner join pg_namespace ns on ns.oid = tbl.relnamespace
|
||||
inner join meta.migration_table mt on mt.isdb
|
||||
and lower(mt.schemaname) = lower(ns.nspname)
|
||||
and lower(mt.tablename) = lower(tbl.relname)
|
||||
WHERE v.relkind = 'v' -- only interested in views
|
||||
AND d.classid = 'pg_rewrite'::regclass
|
||||
AND d.refclassid = 'pg_class'::regclass
|
||||
AND d.deptype = 'n' -- normal dependency
|
||||
;
|
||||
|
||||
--All functions
|
||||
insert into meta.migration_object(rid_migration_model,objectname, objecttype, schema, body, ismodel, isdb)
|
||||
SELECT distinct
|
||||
p_id_migration_model
|
||||
,pp.proname || format('(%s)', pg_get_function_identity_arguments(oid)), --function name.
|
||||
'function',
|
||||
pp.pronamespace::regnamespace::text AS schema, --function located schema
|
||||
null,
|
||||
false,
|
||||
true
|
||||
-- pg_get_functiondef(oid), --function def
|
||||
-- pg_get_function_arguments(oid), --(including default values).
|
||||
-- pg_get_function_identity_arguments(oid), --This form omits default values.
|
||||
-- pg_get_function_result(oid), --Reconstructs the RETURNS clause of a function
|
||||
FROM
|
||||
pg_proc pp
|
||||
WHERE
|
||||
pp.pronamespace::regnamespace::text not in ('pg_catalog','information_schema')
|
||||
and pp.pronamespace::regnamespace::text not ilike 'pg_%'
|
||||
;
|
||||
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',2,'objecttype',m_funcname)::text);
|
||||
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
GET STACKED DIAGNOSTICS
|
||||
m_errmsg = MESSAGE_TEXT
|
||||
,m_errcontext = PG_EXCEPTION_CONTEXT
|
||||
,m_errdetail = PG_EXCEPTION_DETAIL
|
||||
,m_errhint = PG_EXCEPTION_HINT
|
||||
,m_errstate = RETURNED_SQLSTATE;
|
||||
|
||||
p_retval = 1;
|
||||
p_errmsg = format('%s Context %s State: %s', m_errmsg, m_errcontext, m_errstate);
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',3,'error',m_errmsg,'objecttype',m_funcname)::text);
|
||||
END;
|
||||
$$;
|
||||
773
examples/pgsql_meta_upgrade/migration_read.sql
Normal file
773
examples/pgsql_meta_upgrade/migration_read.sql
Normal file
@@ -0,0 +1,773 @@
|
||||
--select * from dropall('migrations_read','meta');
|
||||
CREATE OR REPLACE FUNCTION meta.migration_read(
|
||||
p_id_migration_model integer default null
|
||||
,OUT p_retval integer
|
||||
,OUT p_errmsg text
|
||||
,OUT p_info jsonb
|
||||
)
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
SECURITY DEFINER
|
||||
AS
|
||||
$$
|
||||
DECLARE
|
||||
m_funcname text = 'migrations_read';
|
||||
m_errmsg text;
|
||||
m_errcontext text;
|
||||
m_errdetail text;
|
||||
m_errhint text;
|
||||
m_errstate text;
|
||||
m_retval integer;
|
||||
m_id_migration_model integer;
|
||||
m_payload text;
|
||||
m_tm timestamp;
|
||||
|
||||
m_xml xml;
|
||||
m_json json;
|
||||
j_err jsonb;
|
||||
BEGIN
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
select f.id_migration_model
|
||||
,convert_from(case when f_iscompressed(f.modelfile) = 'gzip' then pl_gzip_bytes(0,f.modelfile) else f.modelfile end, 'utf8')
|
||||
from meta.migration_model f
|
||||
where f.id_migration_model = p_id_migration_model
|
||||
or p_id_migration_model is null
|
||||
order by f.version desc , f.id_migration_model desc
|
||||
into m_id_migration_model, m_payload;
|
||||
|
||||
if m_payload ilike '%<%>%'
|
||||
then
|
||||
m_xml = m_payload::xml;
|
||||
raise notice 'XML File set';
|
||||
elseif m_payload ilike '%{%}%'
|
||||
then
|
||||
raise notice 'JSON File set';
|
||||
m_json = m_payload::json;
|
||||
end if;
|
||||
|
||||
if m_xml is not null
|
||||
then
|
||||
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',1,'objecttype',m_funcname)::text);
|
||||
|
||||
p_info = jsonb_build_object('format','xml');
|
||||
|
||||
delete from meta.migration_table
|
||||
where ismodel;
|
||||
|
||||
delete from meta.migration_index
|
||||
where ismodel;
|
||||
|
||||
delete from meta.migration_relation_col d
|
||||
where d.rid_migration_relation in (
|
||||
select r.id_migration_relation
|
||||
from meta.migration_relation r
|
||||
where r.ismodel
|
||||
);
|
||||
|
||||
delete from meta.migration_relation
|
||||
where ismodel;
|
||||
|
||||
raise notice 'inserting meta.migration_table @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
drop table if exists tmp_meta_migration_table;
|
||||
create temp table tmp_meta_migration_table as
|
||||
select
|
||||
lower((xpath('/table/tableident/text()', node.x))[1]::text)::citext as tableident
|
||||
, lower((xpath('/table/prefix/text()', node.x))[1]::text)::citext as tableprefix
|
||||
, lower((xpath('/table/tablename/text()', node.x))[1]::text)::citext as tablename
|
||||
, lower(coalesce((xpath('/table/schema/text()', node.x))[1]::text,(xpath('/table/schemaname/text()', node.x))[1]::text)) as schemaname
|
||||
, lower((xpath('/table/version/text()', node.x))[1]::text)::citext as version
|
||||
, coalesce((xpath('/table/@seq', node.x))[1]::text,'0')::integer + 1 as schemapriority
|
||||
, node.x as xml
|
||||
from unnest(xpath('/root/tables/table', m_xml)) node(x)
|
||||
;
|
||||
|
||||
insert into meta.migration_table(rid_migration_model ,guid,prefix, tablename, schemaname, version,ismodel, isdb,schemapriority)
|
||||
select distinct on (r.tableident,r.tablename,r.tableprefix,r.schemaname)
|
||||
m_id_migration_model
|
||||
,r.tableident
|
||||
,r.tableprefix
|
||||
,r.tablename
|
||||
,r.schemaname
|
||||
,r.version
|
||||
,true
|
||||
,false
|
||||
,r.schemapriority
|
||||
from tmp_meta_migration_table r
|
||||
;
|
||||
|
||||
|
||||
raise notice 'inserting meta.migration_column @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
insert into meta.migration_column(rid_migration_table, columnname, guid, columntype,indextype,seqseed, columnlen, precision, defaultval)
|
||||
select distinct on (r.id_migration_table,r.columnname,r.columnident)
|
||||
r.id_migration_table,r.columnname,r.columnident,r.columntype,r.indextype,r.seq_seed,r.columnlen,r.precision
|
||||
,r.defaultval
|
||||
from (
|
||||
select
|
||||
mt.id_migration_table
|
||||
, lower((xpath('/column/columnname/text()', col.x))[1]::text) as columnname
|
||||
, (xpath('/column/columnident/text()', col.x))[1]::text as columnident
|
||||
, (xpath('/column/columntype/text()', col.x))[1]::text as columntype
|
||||
, (xpath('/column/indextype/text()', col.x))[1]::text as indextype
|
||||
, coalesce((xpath('/column/seed/text()', col.x))[1]::text,'0')::bigint as seq_seed
|
||||
, coalesce((xpath('/column/columnlen/text()', col.x))[1]::text, '0')::integer as columnlen
|
||||
, coalesce((xpath('/column/precision/text()', col.x))[1]::text, '')::text as precision
|
||||
, coalesce((xpath('/column/defaultval/text()', col.x))[1]::text, '')::text as defaultval
|
||||
from tmp_meta_migration_table tbl
|
||||
cross join unnest(xpath('/table/columns/column', tbl.xml)) col(x)
|
||||
inner join meta.migration_table mt on mt.ismodel
|
||||
and mt.rid_migration_model = m_id_migration_model
|
||||
-- and lower(mt.guid) = lower((xpath('/table/tableident/text()', tbl.x))[1]::text)
|
||||
and lower(mt.tablename) = lower(tbl.tablename)
|
||||
and lower(mt.schemaname) =lower(tbl.schemaname)
|
||||
) r
|
||||
;
|
||||
|
||||
|
||||
raise notice 'inserting meta.migration_index @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
insert into meta.migration_index(rid_migration_model,rid_migration_table, indexname, indextype, ispk,isduplicate, ispartial,partialstr, isunique, sequence, guid,ismodel)
|
||||
select distinct on (r.id_migration_table,r.indexname)
|
||||
m_id_migration_model
|
||||
,r.id_migration_table
|
||||
,r.indexname
|
||||
|
||||
,null
|
||||
,r.indexprimary in ('1','true')
|
||||
,r.indexduplicate in ('1','true')
|
||||
,length(r.indexpartial) > 3
|
||||
,r.indexpartial
|
||||
,(not coalesce(r.indexduplicate,'') in ('1','true') and (r.indexunique in ('1','true') or r.indexprimary in ('1','true')))
|
||||
,row_number() over (order by r.indexname)
|
||||
,r.indexname
|
||||
,true
|
||||
from (
|
||||
select
|
||||
mt.id_migration_table
|
||||
, mt.schemaname
|
||||
, mt.tablename
|
||||
, lower((xpath('/index/indexname/text()', idx.x))[1]::text) as indexname
|
||||
, coalesce(lower((xpath('/index/indexprimary/text()', idx.x))[1]::text),'') as indexprimary
|
||||
, coalesce(lower((xpath('/index/indexduplicate/text()', idx.x))[1]::text),'') as indexduplicate
|
||||
, coalesce(lower((xpath('/index/indexpartial/text()', idx.x))[1]::text),'') as indexpartial
|
||||
, coalesce(lower((xpath('/index/indexunique/text()', idx.x))[1]::text),'') as indexunique
|
||||
from tmp_meta_migration_table tbl
|
||||
cross join unnest(xpath('/table/indexes/index', tbl.xml)) idx(x)
|
||||
inner join meta.migration_table mt on mt.ismodel
|
||||
and mt.rid_migration_model = m_id_migration_model
|
||||
and lower(mt.tablename) = lower(tbl.tablename)
|
||||
and lower(mt.schemaname) =lower(tbl.schemaname)
|
||||
) r
|
||||
;
|
||||
|
||||
|
||||
update meta.migration_index u
|
||||
set isunique = true
|
||||
where u.indexname ilike 'uk_%'
|
||||
and not u.ispk
|
||||
and not u.ispartial
|
||||
and not u.isduplicate
|
||||
;
|
||||
|
||||
-- update meta.migration_index u
|
||||
-- set ispartial = true
|
||||
-- where u.indexname ilike 'k_%'
|
||||
-- and not u.isunique
|
||||
-- and not u.ispk
|
||||
-- ;
|
||||
|
||||
insert into meta.migration_index_col(rid_migration_index,rid_migration_column_parent,sequence)
|
||||
select distinct on (r.id_migration_index,r.id_migration_column)
|
||||
r.id_migration_index
|
||||
,r.id_migration_column
|
||||
,r.seq
|
||||
from (
|
||||
select
|
||||
midx.id_migration_index
|
||||
,mc.id_migration_column
|
||||
,coalesce((xpath('/indexcolumn/@seq', idxcol.x))[1]::text,'0')::integer as seq
|
||||
from tmp_meta_migration_table tbl
|
||||
cross join unnest(xpath('/table/indexes/index', tbl.xml)) idx(x)
|
||||
inner join meta.migration_table mt on mt.ismodel
|
||||
and mt.rid_migration_model = m_id_migration_model
|
||||
and lower(mt.tablename) = lower(tbl.tablename)
|
||||
and lower(mt.schemaname) =lower(tbl.schemaname)
|
||||
inner join meta.migration_index midx on midx.rid_migration_table = mt.id_migration_table
|
||||
and midx.indexname = lower((xpath('/index/indexname/text()', idx.x))[1]::text)
|
||||
|
||||
cross join unnest(xpath('/index/indexcolumns/indexcolumn', idx.x)) idxcol(x)
|
||||
inner join meta.migration_column mc on mt.id_migration_table = mc.rid_migration_table
|
||||
and lower(mc.columnname) = lower((xpath('/indexcolumn/text()', idxcol.x))[1]::text)
|
||||
) r
|
||||
;
|
||||
|
||||
raise notice 'inserting meta.migration_relation temp table @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
drop table if exists tmp_meta_gigration_relation;
|
||||
create temp table tmp_meta_gigration_relation as
|
||||
select
|
||||
tbl.tablename
|
||||
,tbl.schemaname
|
||||
, lower((xpath('/relation/childtable/text()', rel.x))[1]::text) as childtable
|
||||
, lower((xpath('/relation/relationname/text()', rel.x))[1]::text) as relationname
|
||||
, lower((xpath('/relation/relationguid/text()', rel.x))[1]::text) as relationguid
|
||||
, lower((xpath('/relation/deleteconstraint/text()', rel.x))[1]::text) as deleteconstraint
|
||||
, lower((xpath('/relation/updateconstraint/text()', rel.x))[1]::text) as updateconstraint
|
||||
,rel.x as relation
|
||||
from tmp_meta_migration_table tbl
|
||||
cross join unnest(xpath('/table/relations/relation', tbl.xml)) rel(x)
|
||||
;
|
||||
|
||||
raise notice 'inserting meta.migration_relation insert @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
insert
|
||||
into meta.migration_relation( rid_migration_model
|
||||
, rid_migration_table_parent
|
||||
, rid_migration_table_child
|
||||
, relationname
|
||||
, guid
|
||||
, updateconstraint
|
||||
, deleteconstraint
|
||||
, sequence
|
||||
, ismodel)
|
||||
select m_id_migration_model
|
||||
,mt.id_migration_table
|
||||
,ct.id_migration_table as rid_child_table
|
||||
,src.relationname
|
||||
,src.relationguid
|
||||
,src.updateconstraint
|
||||
,src.deleteconstraint
|
||||
,row_number() over (order by src.relationname)
|
||||
,true
|
||||
from tmp_meta_gigration_relation as src
|
||||
inner join meta.migration_table mt on mt.ismodel
|
||||
and mt.rid_migration_model = m_id_migration_model
|
||||
and lower(mt.tablename) = lower(src.tablename)
|
||||
and lower(mt.schemaname) = lower(src.schemaname)
|
||||
inner join meta.migration_table ct on ct.ismodel
|
||||
and ct.rid_migration_model = m_id_migration_model
|
||||
and lower(ct.schemaname) = lower(mt.schemaname)
|
||||
and lower(ct.tablename) = lower(src.childtable)
|
||||
;
|
||||
|
||||
raise notice 'inserting meta.migration_relation_col @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
insert into meta.migration_relation_col(rid_migration_relation, rid_migration_column_parent, rid_migration_column_child, sequence)
|
||||
select mrel.id_migration_relation
|
||||
, parcol.id_migration_column
|
||||
, cldcol.id_migration_column
|
||||
,coalesce((xpath('/keyfields/@seq', key.x))[1]::text,'0')::integer as seq
|
||||
from tmp_meta_gigration_relation src
|
||||
inner join meta.migration_table mt on mt.ismodel
|
||||
and mt.rid_migration_model = m_id_migration_model
|
||||
and lower(mt.tablename) = lower(src.tablename)
|
||||
and lower(mt.schemaname) = lower(src.schemaname)
|
||||
inner join meta.migration_relation mrel on mrel.rid_migration_table_parent = mt.id_migration_table
|
||||
and lower(mrel.relationname) = lower(src.relationname)
|
||||
cross join unnest(xpath('/relation/keyfields', src.relation)) key(x)
|
||||
inner join meta.migration_column parcol on mrel.rid_migration_table_parent = parcol.rid_migration_table
|
||||
and lower(parcol.columnname) = lower((xpath('/keyfields/parentcolumn/text()', key.x))[1]::text)
|
||||
inner join meta.migration_column cldcol on mrel.rid_migration_table_child = cldcol.rid_migration_table
|
||||
and lower(cldcol.columnname) = lower((xpath('/keyfields/childcolumn/text()', key.x))[1]::text)
|
||||
;
|
||||
|
||||
raise notice 'inserting meta.migration_object @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
delete from meta.migration_object
|
||||
where ismodel;
|
||||
|
||||
insert into meta.migration_object(rid_migration_model,objecttype,objectname, schema, version, checksum, sequence, priority, guid, body, ismodel, isdb)
|
||||
select m_id_migration_model
|
||||
,format('script:%s',r.scripttype)
|
||||
,format('%s [%s]',r.scriptname,r.id_scriptcode)
|
||||
,r.dbschema,r.version
|
||||
, case when coalesce(r.scriptchecksum) <> '' then r.scriptchecksum else encode(sha256(convert_to(r.scriptcode,'utf8')),'hex') end
|
||||
, r.sequence, r.priority, r.id_scriptcode::text,r.scriptcode,true,false
|
||||
from (
|
||||
select (xpath('/script/priority/text()', node.x))[1]::text::integer as priority
|
||||
, (xpath('/script/sequence/text()', node.x))[1]::text::integer as sequence
|
||||
, lower((xpath('/script/scriptname/text()', node.x))[1]::text)::citext as scriptname
|
||||
, lower((xpath('/script/scripttype/text()', node.x))[1]::text)::citext as scripttype
|
||||
, lower((xpath('/script/dbschema/text()', node.x))[1]::text)::citext as dbschema
|
||||
, lower((xpath('/script/programname/text()', node.x))[1]::text)::citext as programname
|
||||
, lower((xpath('/script/version/text()', node.x))[1]::text)::citext as version
|
||||
, lower((xpath('/script/scriptchecksum/text()', node.x))[1]::text)::citext as scriptchecksum
|
||||
, xml_extract_value('/script/code', node.x)::text as scriptcode
|
||||
,'dct' as source
|
||||
,(xpath('/script/id_scriptcode/text()', node.x))[1]::text::integer as id_scriptcode
|
||||
from unnest(xpath('/root/scripts/script', m_xml )) node(x)
|
||||
) r
|
||||
;
|
||||
|
||||
elsif m_json is not null
|
||||
then
|
||||
p_info = jsonb_build_object('format','json');
|
||||
raise exception 'Not yet supported';
|
||||
else
|
||||
p_info = jsonb_build_object('format','unknown');
|
||||
raise exception 'Unsupported input file, Content: %', substr(m_payload, 1,20);
|
||||
end if;
|
||||
|
||||
|
||||
|
||||
|
||||
insert into meta.migration_column(rid_migration_table, columnname, columntype, guid)
|
||||
select distinct on (t.id_migration_table)
|
||||
t.id_migration_table, 'updatecnt', 'integer',format('updatecnt_%s', t.id_migration_table)
|
||||
from meta.migration_table t
|
||||
left outer join meta.migration_column c on c.rid_migration_table = t.id_migration_table
|
||||
and c.columnname = 'updatecnt'
|
||||
where t.ismodel
|
||||
and t.tablename not in (
|
||||
select uut.tablename
|
||||
from meta.f_upgrade_table() uut
|
||||
)
|
||||
and c.id_migration_column is null
|
||||
;
|
||||
|
||||
raise notice 'updates section @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
|
||||
--Set primary key field from indexes
|
||||
update meta.migration_column u
|
||||
set ispk = true
|
||||
from meta.migration_column c
|
||||
inner join meta.migration_table t on t.id_migration_table = c.rid_migration_table
|
||||
and t.ismodel
|
||||
inner join meta.migration_index idx on idx.rid_migration_table = t.id_migration_table
|
||||
inner join meta.migration_index_col idxcol on idxcol.rid_migration_index = idx.id_migration_index
|
||||
and idxcol.rid_migration_column_parent = c.id_migration_column
|
||||
where idx.ispk
|
||||
and u.id_migration_column = c.id_migration_column
|
||||
;
|
||||
|
||||
|
||||
--Set length for strings
|
||||
update meta.migration_column
|
||||
set columnlen = reverse(substr(reverse(columntype),2,strpos(reverse(columntype),'(')-2))::integer
|
||||
- (case when columntype ilike '%cstring%' then 1 else 0 end)
|
||||
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
|
||||
and columntype ilike '%string%'
|
||||
;
|
||||
|
||||
---Set the length and precision values
|
||||
update meta.migration_column u
|
||||
set columnlen = case when coalesce(u.columnlen,0) = 0 and (r.precision > 0 or r.scale > 0)
|
||||
then coalesce(r.precision,0) + coalesce(r.scale,0)
|
||||
else u.columnlen
|
||||
end
|
||||
,precision = format('%s,%s',r.precision,r.scale)
|
||||
from (
|
||||
select id_migration_column
|
||||
,substring(columntype FROM '\((\d+),(\d+)\)')::integer AS precision
|
||||
, substring(columntype FROM '\(\d+,(\d+)\)')::integer AS scale
|
||||
from meta.migration_column
|
||||
where
|
||||
rid_migration_table in (
|
||||
select t.id_migration_table from meta.migration_table t where t.ismodel
|
||||
)
|
||||
and columntype ilike '%(%)'
|
||||
) r
|
||||
where u.id_migration_column = r.id_migration_column
|
||||
;
|
||||
|
||||
--Set default values for prefixes
|
||||
update meta.migration_column u
|
||||
set defaultval = (
|
||||
select quote_literal(upper(t.prefix))
|
||||
from meta.migration_table t
|
||||
where t.id_migration_table = u.rid_migration_table
|
||||
and t.ismodel
|
||||
and nv(t.prefix) <> ''
|
||||
)
|
||||
where u.rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
|
||||
and u.columnname = 'prefix'
|
||||
and coalesce(u.defaultval,'') = ''
|
||||
;
|
||||
|
||||
--Set GUID field types. e.g. text, uuid
|
||||
with option as (
|
||||
select name, value
|
||||
from meta.migration_option
|
||||
where name = 'guidtype'
|
||||
)
|
||||
update meta.migration_column u
|
||||
set columntype = option.value
|
||||
,defaultval = case when nv(u.defaultval) = '' then 'newid()' else u.defaultval end
|
||||
from option
|
||||
where rid_migration_table in (
|
||||
select t.id_migration_table
|
||||
from meta.migration_table t
|
||||
where t.ismodel
|
||||
and not exists (
|
||||
select 1
|
||||
from meta.migration_option o
|
||||
cross join regexp_split_to_table(o.value, ',') rex(v)
|
||||
where o.name = 'textguid'
|
||||
and rex.v::citext = t.tablename
|
||||
and t.schemaname = 'public'
|
||||
)
|
||||
)
|
||||
and (u.columnname in ('guid','uuid')
|
||||
or u.columnname ilike 'guid%'
|
||||
)
|
||||
and u.columntype is distinct from option.value
|
||||
|
||||
;
|
||||
|
||||
--Limit length constraints
|
||||
with option as (
|
||||
select name, value::numeric as numvalue
|
||||
from meta.migration_option
|
||||
where name = 'max_constraint'
|
||||
and value ~ '^-?[0-9]+(\.[0-9]+)?$'
|
||||
)
|
||||
update meta.migration_column u
|
||||
set columnlen = 0
|
||||
from option
|
||||
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
|
||||
and u.columnlen >= option.numvalue
|
||||
;
|
||||
|
||||
--Force names if json type options
|
||||
with option as (
|
||||
select name, value
|
||||
from meta.migration_option
|
||||
where name = 'settype_names_jsonb'
|
||||
and value is not null
|
||||
)
|
||||
update meta.migration_column u
|
||||
set columntype = 'jsonb'
|
||||
from option
|
||||
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
|
||||
and lower(u.columnname) in (
|
||||
SELECT lower(t.v) from regexp_split_to_table(option.value, ',') t(v)
|
||||
)
|
||||
;
|
||||
|
||||
--Convert the program types to postgres types
|
||||
update meta.migration_column u
|
||||
set columntype = meta.f_datatype_map(u.columntype)
|
||||
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
|
||||
and meta.f_datatype_map(u.columntype) not ilike '%unknown%'
|
||||
;
|
||||
|
||||
update meta.migration_column u
|
||||
set columntype = case when u.columntype in ('date','text','citext') and (u.columnname ilike '%datetime%' or u.columnname ilike '%timestamp%')
|
||||
then 'timestamp'
|
||||
else u.columntype
|
||||
end
|
||||
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
|
||||
and meta.f_datatype_map(u.columntype) not ilike '%unknown%'
|
||||
;
|
||||
|
||||
|
||||
--Larges objects has no lengths
|
||||
update meta.migration_column u
|
||||
set columnlen = 0
|
||||
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
|
||||
and lower(u.columntype) in ('blob,0','jsonb','json','blob','bytea')
|
||||
;
|
||||
|
||||
j_err = null;
|
||||
select jsonb_agg(
|
||||
jsonb_build_object('tablename',t.tablename,'schemaname',t.schemaname,'word', kw.word)
|
||||
)
|
||||
from meta.migration_table t
|
||||
inner join pg_get_keywords() kw on lower(kw.word) = lower(t.tablename)
|
||||
and lower(kw.catdesc::text) = 'reserved'
|
||||
where t.ismodel
|
||||
into j_err
|
||||
;
|
||||
|
||||
if jsonb_typeof(j_err) = 'array'
|
||||
then
|
||||
p_info = p_info || jsonb_build_object('table_reserved_words',j_err) ;
|
||||
end if;
|
||||
|
||||
j_err = null;
|
||||
select jsonb_agg(
|
||||
jsonb_build_object('columnname',u.columnname,'tablename',t.tablename,'schemaname',t.schemaname,'word', kw.word)
|
||||
)
|
||||
from meta.migration_column u
|
||||
inner join meta.migration_table t on t.id_migration_table = u.rid_migration_table
|
||||
inner join pg_get_keywords() kw on lower(kw.word) = lower(u.columnname)
|
||||
and lower(kw.catdesc::text) = 'reserved'
|
||||
where u.rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
|
||||
into j_err
|
||||
;
|
||||
|
||||
if jsonb_typeof(j_err) = 'array'
|
||||
then
|
||||
p_info = p_info || jsonb_build_object('column_reserved_words',j_err) ;
|
||||
end if;
|
||||
|
||||
----Set the default value to the identity if the pk type is identity
|
||||
update meta.migration_column u
|
||||
set defaultval = r.def
|
||||
from (
|
||||
select
|
||||
c.id_migration_column
|
||||
,format($S$nextval('%s.identity_%s_%s'::regclass)$S$,t.schemaname,t.tablename,c.columnname) as def
|
||||
from meta.migration_table t
|
||||
inner join meta.migration_column c on c.rid_migration_table = t.id_migration_table
|
||||
where
|
||||
t.ismodel
|
||||
and c.ispk
|
||||
and nv(c.defaultval) = ''
|
||||
and c.indextype = 'identity'
|
||||
) r
|
||||
where u.id_migration_column = r.id_migration_column;
|
||||
|
||||
update meta.migration_table u
|
||||
set ismodel = false
|
||||
where format('%s_%s',u.schemaname, u.tablename) in (
|
||||
select format('%s_%s',split_part(o.name,':',2)
|
||||
,t.name
|
||||
)
|
||||
from meta.migration_option o
|
||||
cross join regexp_split_to_table(o.value,',') t(name)
|
||||
where o.name ilike 'exclude:%'
|
||||
and t.name <> ''
|
||||
)
|
||||
and u.ismodel
|
||||
;
|
||||
|
||||
raise notice 'duplicates section @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
|
||||
|
||||
--Move duplicate tables that exists in the settings to their default location. Move the keys as well.
|
||||
with move as (
|
||||
select
|
||||
t1.schemaname
|
||||
, t1.tablename
|
||||
, t1.id_migration_table as id_migration_table_dest
|
||||
, t2.schemaname
|
||||
, t2.tablename
|
||||
, t2.id_migration_table as id_migration_table_src
|
||||
,format('%s.%s',t1.schemaname, t1.tablename) as named
|
||||
from meta.migration_table t1
|
||||
inner join meta.migration_table t2 on t2.ismodel
|
||||
and t2.tablename = t1.tablename
|
||||
and t2.schemaname is distinct from t1.schemaname
|
||||
where
|
||||
t1.ismodel
|
||||
--and not t1.isdb
|
||||
and (
|
||||
format('%s.%s',t1.schemaname, t1.tablename) in (
|
||||
select format('%s.%s',o.value,lower(split_part(o.name, ':', 2)))
|
||||
from meta.migration_option o
|
||||
where o.name ilike 'default_schema:%'
|
||||
)
|
||||
)
|
||||
), cols as (
|
||||
update meta.migration_column u
|
||||
set rid_migration_table = m.id_migration_table_dest
|
||||
from move m
|
||||
inner join meta.migration_column c1 on c1.rid_migration_table = m.id_migration_table_src
|
||||
and c1.columnname not in (
|
||||
select c2.columnname
|
||||
from meta.migration_column c2
|
||||
where c2.rid_migration_table = m.id_migration_table_dest
|
||||
)
|
||||
where u.id_migration_column = c1.id_migration_column
|
||||
returning *
|
||||
), relations1 as (
|
||||
update meta.migration_relation u
|
||||
set rid_migration_table_parent = m.id_migration_table_dest
|
||||
,relationname = format('%s_merged',u.relationname)
|
||||
from move m
|
||||
where u.rid_migration_table_parent = m.id_migration_table_src
|
||||
returning *
|
||||
), relations2 as (
|
||||
update meta.migration_relation u
|
||||
set rid_migration_table_child = m.id_migration_table_dest
|
||||
,relationname = format('%s_merged',u.relationname)
|
||||
from move m
|
||||
where u.rid_migration_table_child = m.id_migration_table_src
|
||||
returning *
|
||||
), relationscols as (
|
||||
update meta.migration_relation_col u
|
||||
set rid_migration_column_child = mc2.id_migration_column
|
||||
from move m
|
||||
inner join meta.migration_column mc on mc.rid_migration_table = m.id_migration_table_src
|
||||
inner join meta.migration_column mc2 on mc2.rid_migration_table = m.id_migration_table_dest
|
||||
and mc2.columnname = mc.columnname
|
||||
inner join meta.migration_relation_col rc on rc.rid_migration_column_child = mc.id_migration_column
|
||||
where u.id_migration_relationcol = rc.id_migration_relationcol
|
||||
), relationscols2 as (
|
||||
update meta.migration_relation_col u
|
||||
set rid_migration_column_parent = mc2.id_migration_column
|
||||
from move m
|
||||
inner join meta.migration_column mc on mc.rid_migration_table = m.id_migration_table_src
|
||||
inner join meta.migration_column mc2 on mc2.rid_migration_table = m.id_migration_table_dest
|
||||
and mc2.columnname = mc.columnname
|
||||
inner join meta.migration_relation_col rc on rc.rid_migration_column_parent = mc.id_migration_column
|
||||
where u.id_migration_relationcol = rc.id_migration_relationcol
|
||||
), idx as (
|
||||
update meta.migration_index u
|
||||
set rid_migration_table = m.id_migration_table_dest
|
||||
from move m
|
||||
where u.rid_migration_table = m.id_migration_table_src
|
||||
returning *
|
||||
), idxcols as (
|
||||
update meta.migration_index_col u
|
||||
set rid_migration_column_parent = col.id_migration_column
|
||||
from move m
|
||||
inner join meta.migration_column col on col.rid_migration_table = m.id_migration_table_src
|
||||
inner join meta.migration_column col2 on col2.rid_migration_table = m.id_migration_table_dest
|
||||
and col2.columnname = col.columnname
|
||||
where u.rid_migration_column_parent = col.id_migration_column
|
||||
)
|
||||
update meta.migration_table u
|
||||
set ismodel = false
|
||||
from move mv
|
||||
where u.id_migration_table = mv.id_migration_table_src;
|
||||
|
||||
|
||||
update meta.migration_index u
|
||||
set indexname = format('%s_%s_%s',u.indexname,tbl.schemaname,tbl.tablename)
|
||||
from meta.migration_table tbl
|
||||
where u.rid_migration_table = tbl.id_migration_table
|
||||
and u.indexname not ilike '%' || tbl.schemaname ||'%' || tbl.tablename || '%s'
|
||||
;
|
||||
|
||||
|
||||
|
||||
|
||||
--disable and report duplicates. Take the high priority schema
|
||||
|
||||
with dup as (
|
||||
select
|
||||
t.tablename as tablename_src
|
||||
, t2.tablename as tablename_dest
|
||||
, t.schemaname as schemaname_src
|
||||
, t2.schemaname as schemaname_dest
|
||||
, t.id_migration_table as id_migration_table_dest
|
||||
, t2.id_migration_table as id_migration_table_src
|
||||
from meta.migration_table t
|
||||
inner join meta.migration_table t2 on t2.ismodel
|
||||
and t.tablename = t2.tablename
|
||||
and t.schemaname is distinct from t2.schemaname
|
||||
and t.schemapriority < t2.schemapriority
|
||||
where t.ismodel
|
||||
and exists (
|
||||
select *
|
||||
from meta.migration_option o
|
||||
where o.name ilike 'default_schema:%'
|
||||
and (split_part(o.name, ':', 2) = t.tablename
|
||||
or split_part(o.name, ':', 2) = t2.tablename
|
||||
)
|
||||
)
|
||||
), upd as (
|
||||
update meta.migration_table u
|
||||
set ismodel = false
|
||||
from dup
|
||||
where u.id_migration_table = dup.id_migration_table_src
|
||||
returning *
|
||||
)
|
||||
select jsonb_agg(to_jsonb(dup))
|
||||
from dup
|
||||
into m_json;
|
||||
|
||||
|
||||
|
||||
insert into meta.table_prefix(schemaname,tablename, prefix)
|
||||
select distinct t.schemaname,t.tablename,t.prefix
|
||||
from meta.migration_table t
|
||||
where t.ismodel
|
||||
and coalesce(t.prefix,'') <> ''
|
||||
and not exists (
|
||||
select 1 from meta.table_prefix p where p.schemaname = t.schemaname and p.tablename = t.tablename
|
||||
);
|
||||
|
||||
update meta.table_prefix u
|
||||
set prefix = t.prefix
|
||||
from meta.migration_table t
|
||||
where t.ismodel
|
||||
and u.tablename = t.tablename
|
||||
and u.schemaname = t.schemaname
|
||||
and u.prefix is distinct from t.prefix
|
||||
;
|
||||
|
||||
update meta.migration_relation u
|
||||
set ismodel = false
|
||||
from (
|
||||
select max(mr.rid_migration_table_child) as rid_migration_table_child
|
||||
, max(mr.rid_migration_table_parent) as rid_migration_table_parent
|
||||
, count(1) as cnt
|
||||
, array_agg(mr.id_migration_relation) as a_id_migration_relation
|
||||
, min(mr.id_migration_relation) as id_migration_relation_keep
|
||||
, array_agg(mr.relationname)
|
||||
,string_agg(c1.columnname,'_')
|
||||
from meta.migration_relation mr
|
||||
inner join meta.migration_relation_col rc on rc.rid_migration_relation = mr.id_migration_relation
|
||||
inner join meta.migration_table t1 on t1.id_migration_table = mr.rid_migration_table_parent
|
||||
inner join meta.migration_table t2 on t2.id_migration_table = mr.rid_migration_table_child
|
||||
inner join meta.migration_column c1 on c1.id_migration_column = rc.rid_migration_column_parent
|
||||
inner join meta.migration_column c2 on c2.id_migration_column = rc.rid_migration_column_child
|
||||
where mr.ismodel
|
||||
|
||||
group by t1.schemaname, t1.tablename,t2.schemaname,t2.tablename,c1.columnname,c2.columnname
|
||||
having count(1) > 1
|
||||
) r
|
||||
where u.id_migration_relation = any(a_id_migration_relation)
|
||||
and u.id_migration_relation <> r.id_migration_relation_keep
|
||||
;
|
||||
|
||||
/*
|
||||
update meta.migration_relation u
|
||||
set ismodel = false
|
||||
,isdb = false
|
||||
from (
|
||||
select mr.relationname
|
||||
, mr.rid_migration_table_child
|
||||
, mr.rid_migration_table_parent
|
||||
, count(1) as cnt
|
||||
, array_agg(mr.id_migration_relation) as a_id_migration_relation
|
||||
, min(mr.id_migration_relation) as id_migration_relation
|
||||
from meta.migration_relation mr
|
||||
where mr.ismodel
|
||||
group by mr.relationname, mr.rid_migration_table_child, mr.rid_migration_table_parent
|
||||
) r
|
||||
where u.id_migration_relation = any(r.a_id_migration_relation)
|
||||
and u.id_migration_relation is distinct from r.id_migration_relation
|
||||
;
|
||||
*/
|
||||
|
||||
|
||||
raise notice 'done @ %', (clock_timestamp() - m_tm)::interval;
|
||||
m_tm = clock_timestamp();
|
||||
|
||||
|
||||
|
||||
p_info = coalesce(jsonb_concat(p_info, m_json::jsonb),p_info);
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',2,'objecttype',m_funcname)::text);
|
||||
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
GET STACKED DIAGNOSTICS
|
||||
m_errmsg = MESSAGE_TEXT
|
||||
,m_errcontext = PG_EXCEPTION_CONTEXT
|
||||
,m_errdetail = PG_EXCEPTION_DETAIL
|
||||
,m_errhint = PG_EXCEPTION_HINT
|
||||
,m_errstate = RETURNED_SQLSTATE;
|
||||
|
||||
p_retval = 1;
|
||||
p_errmsg = format('%s Context %s State: %s', m_errmsg, m_errcontext, m_errstate);
|
||||
raise warning '% % hint:% state:% context:%',m_errmsg,m_errdetail,m_errhint,m_errstate,m_errcontext;
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',3,'error',m_errmsg,'objecttype',m_funcname)::text);
|
||||
END;
|
||||
$$;
|
||||
199
examples/pgsql_meta_upgrade/migration_run.sql
Normal file
199
examples/pgsql_meta_upgrade/migration_run.sql
Normal file
@@ -0,0 +1,199 @@
|
||||
select * from dropall('migrations_run','meta');
|
||||
CREATE OR REPLACE FUNCTION meta.migration_run(
|
||||
p_id_migration_model integer default null
|
||||
,p_priority_start integer default 0
|
||||
,p_priority_end integer default 10000
|
||||
,INOUT p_info jsonb default null
|
||||
,OUT p_retval integer
|
||||
,OUT p_errmsg text
|
||||
)
|
||||
LANGUAGE plpgsql VOLATILE
|
||||
SECURITY DEFINER
|
||||
AS
|
||||
$$
|
||||
DECLARE
|
||||
--Error Handling--
|
||||
m_funcname text = 'meta.migration_run';
|
||||
m_errmsg text;
|
||||
m_errcontext text;
|
||||
m_errdetail text;
|
||||
m_errhint text;
|
||||
m_errstate text;
|
||||
m_retval integer;
|
||||
--Error Handling--
|
||||
m_dblink_conn TEXT;
|
||||
m_dblink_login TEXT;
|
||||
m_gotemplate text;
|
||||
m_pid integer;
|
||||
r_lp record;
|
||||
m_errcnt integer;
|
||||
G_BREAK_ERR_CNT constant integer = 500;
|
||||
BEGIN
|
||||
m_dblink_conn = 'dblink_migration';
|
||||
p_retval = 0;
|
||||
p_errmsg = '';
|
||||
m_pid = pg_backend_pid();
|
||||
p_info = jsonb_build_array();
|
||||
|
||||
m_gotemplate = $CQl$do $OUTERDO$
|
||||
declare
|
||||
m_lock_timeout text = '1500ms';
|
||||
m_max_attempts int = 2;
|
||||
m_ddl_completed boolean = false;
|
||||
begin
|
||||
set local lock_timeout = '1500ms';
|
||||
perform set_config('lock_timeout', m_lock_timeout, false);
|
||||
|
||||
for i in 1..m_max_attempts loop
|
||||
begin
|
||||
execute $EXECMIGRATION$[proc]$EXECMIGRATION$;
|
||||
m_ddl_completed = true;
|
||||
exit;
|
||||
exception when lock_not_available then
|
||||
raise warning 'attempt %/% to lock table "test" failed', i, m_max_attempts;
|
||||
|
||||
perform pg_sleep(0.1);
|
||||
end;
|
||||
-- if i > 1
|
||||
-- then
|
||||
-- SET statement_timeout = 20000;
|
||||
-- end if;
|
||||
end loop;
|
||||
end
|
||||
$OUTERDO$;
|
||||
$CQl$;
|
||||
|
||||
|
||||
select 'host=127.0.0.1 dbname='||r.dbname||' port=' || r.port || ' user='|| r.loginuser ||' password=' || r.password
|
||||
from f_get_local_conn('migration') r
|
||||
into m_dblink_login;
|
||||
|
||||
if not exists (SELECT T.text from unnest(dblink_get_connections()) T where T.text = m_dblink_conn)
|
||||
then
|
||||
PERFORM dblink_connect(m_dblink_conn, m_dblink_login);
|
||||
end if;
|
||||
|
||||
perform log_event(m_funcname,format('migrations_run init [%s to %s] with connection %s',p_priority_start,p_priority_end,m_dblink_conn),bt_enum('eventlog','upgrade'));
|
||||
|
||||
PERFORM dblink_exec(m_dblink_conn, format($S$set application_name to 'MIGRATION:%s'; $S$, m_pid), false);
|
||||
|
||||
for r_lp in (
|
||||
select *
|
||||
,''::text as result
|
||||
,clock_timestamp() as startat
|
||||
|
||||
from meta.migration_script s
|
||||
where coalesce(s.status,0) = 0
|
||||
and s.priority >= p_priority_start
|
||||
and s.priority <= p_priority_end
|
||||
and octet_length(s.body) > 1
|
||||
order by s.priority, s.sequence, s.id_migration_script
|
||||
)
|
||||
loop
|
||||
if m_errcnt > G_BREAK_ERR_CNT
|
||||
then
|
||||
perform log_event(m_funcname,format('migrations_run Max Error Count [%s to %s] Errors %s/s%',p_priority_start,p_priority_end,m_errcnt,G_BREAK_ERR_CNT),bt_enum('eventlog','upgrade'));
|
||||
|
||||
exit;
|
||||
end if;
|
||||
|
||||
perform dblink_exec(m_dblink_conn, format($Q$ DO $OUTERDO$ BEGIN
|
||||
|
||||
if not meta.f_can_migrate()
|
||||
then
|
||||
raise exception 'Cannot migrate. Check the f_can_migrate function.';
|
||||
end if;
|
||||
|
||||
update meta.migration_script u
|
||||
set status = 1
|
||||
,changedat = now()
|
||||
where u.id_migration_script = %1$s;
|
||||
|
||||
END; $OUTERDO$; $Q$, r_lp.id_migration_script), true)
|
||||
;
|
||||
|
||||
begin
|
||||
|
||||
--raise notice 'Lp %: %',r_lp.objectname, r_lp.result;
|
||||
|
||||
--raise notice 'Executing % % [%,%]', r_lp.objectname , r_lp.objecttype, r_lp.priority, r_lp.sequence;
|
||||
|
||||
select dblink_exec(m_dblink_conn, replace(replace(replace(m_gotemplate,'[proc]', r_lp.body),'[objectname]',r_lp.objectname),'[priority]',r_lp.priority::text), true)
|
||||
into r_lp.result;
|
||||
|
||||
r_lp.duration = clock_timestamp() - r_lp.startat;
|
||||
|
||||
perform log_event(m_funcname,format('Ran Script %s @%s ',r_lp.objectname,r_lp.priority),bt_enum('eventlog','upgrade'));
|
||||
|
||||
|
||||
perform dblink_exec(m_dblink_conn, format($Q$
|
||||
update meta.migration_script u
|
||||
set status = 2
|
||||
,changedat = now()
|
||||
,duration = %2$s::interval
|
||||
where u.id_migration_script = %1$s;
|
||||
$Q$, r_lp.id_migration_script, quote_literal( r_lp.duration)), true)
|
||||
;
|
||||
|
||||
p_info = jsonb_concat(p_info, jsonb_build_object('objectname',r_lp.objectname,'objecttype', r_lp.objecttype,'status','success'));
|
||||
|
||||
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
GET STACKED DIAGNOSTICS
|
||||
m_errmsg = MESSAGE_TEXT;
|
||||
|
||||
raise notice 'Exception % -> %',r_lp.objectname, m_errmsg;
|
||||
r_lp.duration = clock_timestamp() - r_lp.startat;
|
||||
perform log_event(m_funcname,format('migrations_run error %s -> %s >> %s',r_lp.objecttype,r_lp.objectname,m_errmsg),bt_enum('eventlog','local error'));
|
||||
|
||||
perform dblink_exec(m_dblink_conn, format($Q$
|
||||
update meta.migration_script u
|
||||
set status = 3
|
||||
,error = %2$s || E' \nResult: ' || %3$s
|
||||
,changedat = now()
|
||||
,duration = %4$s::interval
|
||||
where u.id_migration_script = %1$s;
|
||||
$Q$, r_lp.id_migration_script, quote_literal(m_errmsg), quote_literal(coalesce(r_lp.result,'')), quote_literal( r_lp.duration) ), false);
|
||||
|
||||
p_info = jsonb_concat(p_info, jsonb_build_object('objectname',r_lp.objectname,'objecttype', r_lp.objecttype,'status','error', 'error',m_errmsg));
|
||||
|
||||
m_errcnt = coalesce(m_errcnt,0) + 1;
|
||||
|
||||
end;
|
||||
|
||||
perform dblink_exec(m_dblink_conn, format($Q$DO $DDD$
|
||||
declare
|
||||
m_text text;
|
||||
begin
|
||||
select pg_notify('upgrade.events', json_build_object('type','upgrade','status',m.status,'objecttype', m.objecttype,'objectname',m.objectname)::text)
|
||||
from meta.migration_script m
|
||||
where m.id_migration_script = %1$s
|
||||
into m_text;
|
||||
end;
|
||||
$DDD$;$Q$, r_lp.id_migration_script, quote_literal(m_errmsg), quote_literal(coalesce(r_lp.result,'')) ), false);
|
||||
|
||||
end loop;
|
||||
|
||||
perform dblink_disconnect(m_dblink_conn);
|
||||
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
GET STACKED DIAGNOSTICS
|
||||
m_errmsg = MESSAGE_TEXT
|
||||
,m_errcontext = PG_EXCEPTION_CONTEXT
|
||||
,m_errdetail = PG_EXCEPTION_DETAIL
|
||||
,m_errhint = PG_EXCEPTION_HINT
|
||||
,m_errstate = RETURNED_SQLSTATE;
|
||||
|
||||
p_retval = 1;
|
||||
p_errmsg = format('%s Context %s State: %s', m_errmsg, m_errcontext, m_errstate);
|
||||
|
||||
if exists (SELECT T.text from unnest(dblink_get_connections()) T where T.text = m_dblink_conn)
|
||||
then
|
||||
perform dblink_disconnect(m_dblink_conn);
|
||||
end if;
|
||||
|
||||
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',3,'error',m_errmsg,'objecttype',m_funcname)::text);
|
||||
END;
|
||||
$$;
|
||||
53
examples/pgsql_meta_upgrade/migration_tables_options.sql
Normal file
53
examples/pgsql_meta_upgrade/migration_tables_options.sql
Normal file
@@ -0,0 +1,53 @@
|
||||
|
||||
with ops (name,value) as (
|
||||
values ('guidtype','uuid')
|
||||
,('max_constraint','30')
|
||||
,('droprename','_dropped_')
|
||||
,('settype_names_jsonb','jsonvalue,jsonbvalue,jsonschema')
|
||||
,('default_schema:hub','core')
|
||||
,('default_schema:hub_link','core')
|
||||
--,('default_schema:colour','core')
|
||||
--- ,('default_schema:mastertype','core') --Master type must not be set for pgifas
|
||||
,('default_schema:workflowitem','core')
|
||||
,('exclude:public','suppinvoice,suppinvoiceitem,project,generalledger,supporder,subaccount,customerinvoice,custcreditnoteitem,custinvoiceitem,cashbookdetail,supporderitem,bankstatement,journaldetail,custcreditnote,custcreditnoteitem,autoreconstring,Suppcreditnote,suppcreditnoteitem')
|
||||
-- ,('default_schema:fiscalyear','core')
|
||||
-- ,('default_schema:autoreconstring','core')
|
||||
-- ,('default_schema:postalcode','core')
|
||||
-- ,('default_schema:cashbookdetail','core')
|
||||
-- ,('default_schema:supporderitem','core')
|
||||
-- ,('default_schema:accounttype','core')
|
||||
-- ,('default_schema:bankstatement','core')
|
||||
-- ,('default_schema:journaldetail','core')
|
||||
-- ,('default_schema:cashbook','core')
|
||||
-- ,('default_schema:custcreditnote','core')
|
||||
-- ,('default_schema:custcreditnoteitem','core')
|
||||
-- ,('default_schema:custinvoiceitem','core')
|
||||
-- ,('default_schema:customerinvoice','core')
|
||||
-- ,('default_schema:generalledger','core')
|
||||
-- ,('default_schema:journal','core')
|
||||
-- ,('default_schema:suppcreditnote','core')
|
||||
-- ,('default_schema:suppcreditnoteitem','core')
|
||||
-- ,('default_schema:suppinvoice','core')
|
||||
-- ,('default_schema:suppinvoiceitem','core')
|
||||
-- ,('default_schema:supporder','core')
|
||||
-- ,('default_schema:subaccount','core')
|
||||
-- ,('default_schema:project','core')
|
||||
-- ,('default_schema:period','core')
|
||||
), upd as (
|
||||
update meta.migration_option o
|
||||
set value = ops.value
|
||||
from ops
|
||||
where o.name = ops.name
|
||||
returning *
|
||||
)
|
||||
insert into meta.migration_option(name,value)
|
||||
select ops.name,ops.value from ops
|
||||
where not exists (
|
||||
select *
|
||||
from meta.migration_option o
|
||||
where o.name = ops.name
|
||||
);
|
||||
|
||||
delete from meta.migration_option o
|
||||
where o.name in ('default_schema:mastertype','default_schema:colour')
|
||||
;
|
||||
96
examples/pgsql_reader_example.go
Normal file
96
examples/pgsql_reader_example.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Example PostgreSQL connection string
|
||||
// Format: postgres://username:password@localhost:5432/database_name
|
||||
connectionString := "postgres://user:password@localhost:5432/mydb"
|
||||
|
||||
// Create reader options
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: connectionString,
|
||||
}
|
||||
|
||||
// Create PostgreSQL reader
|
||||
reader := pgsql.NewReader(options)
|
||||
|
||||
// Read the entire database
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read database: %v", err)
|
||||
}
|
||||
|
||||
// Display database information
|
||||
fmt.Printf("Database: %s\n", db.Name)
|
||||
fmt.Printf("Type: %s\n", db.DatabaseType)
|
||||
fmt.Printf("Version: %s\n", db.DatabaseVersion)
|
||||
fmt.Printf("Schemas: %d\n\n", len(db.Schemas))
|
||||
|
||||
// Iterate through schemas
|
||||
for _, schema := range db.Schemas {
|
||||
fmt.Printf("Schema: %s\n", schema.Name)
|
||||
fmt.Printf(" Tables: %d\n", len(schema.Tables))
|
||||
fmt.Printf(" Views: %d\n", len(schema.Views))
|
||||
fmt.Printf(" Sequences: %d\n", len(schema.Sequences))
|
||||
|
||||
// Display table details
|
||||
for _, table := range schema.Tables {
|
||||
fmt.Printf(" Table: %s.%s\n", schema.Name, table.Name)
|
||||
fmt.Printf(" Columns: %d\n", len(table.Columns))
|
||||
fmt.Printf(" Constraints: %d\n", len(table.Constraints))
|
||||
fmt.Printf(" Indexes: %d\n", len(table.Indexes))
|
||||
fmt.Printf(" Relationships: %d\n", len(table.Relationships))
|
||||
|
||||
// Display columns
|
||||
for _, col := range table.Columns {
|
||||
fmt.Printf(" - %s: %s", col.Name, col.Type)
|
||||
if col.IsPrimaryKey {
|
||||
fmt.Printf(" [PK]")
|
||||
}
|
||||
if col.NotNull {
|
||||
fmt.Printf(" NOT NULL")
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
// Display view details
|
||||
for _, view := range schema.Views {
|
||||
fmt.Printf(" View: %s.%s\n", schema.Name, view.Name)
|
||||
fmt.Printf(" Columns: %d\n", len(view.Columns))
|
||||
fmt.Printf(" Definition: %s\n", view.Definition)
|
||||
}
|
||||
|
||||
// Display sequence details
|
||||
for _, seq := range schema.Sequences {
|
||||
fmt.Printf(" Sequence: %s.%s\n", schema.Name, seq.Name)
|
||||
fmt.Printf(" Start: %d, Increment: %d\n", seq.StartValue, seq.IncrementBy)
|
||||
if seq.OwnedByTable != "" {
|
||||
fmt.Printf(" Owned by: %s.%s\n", seq.OwnedByTable, seq.OwnedByColumn)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Example: Read just the first schema
|
||||
schema, err := reader.ReadSchema()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read schema: %v", err)
|
||||
}
|
||||
fmt.Printf("First schema: %s\n", schema.Name)
|
||||
|
||||
// Example: Read just the first table
|
||||
table, err := reader.ReadTable()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read table: %v", err)
|
||||
}
|
||||
fmt.Printf("First table: %s.%s\n", table.Schema, table.Name)
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -7,6 +7,9 @@ require (
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.6 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
@@ -17,6 +20,7 @@ require (
|
||||
github.com/spf13/viper v1.21.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
14
go.sum
14
go.sum
@@ -1,14 +1,22 @@
|
||||
github.com/bitechdev/ResolveSpec v0.0.108 h1:0Asw4zt9SdBIDprNqtrGY67R4SovAPBmW2y1qRn/Wjw=
|
||||
github.com/bitechdev/ResolveSpec v0.0.108/go.mod h1:/mtVcbXSBLNmWlTKeDnbQx18tmNqOnrpetpLOadLzqo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
@@ -25,15 +33,21 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -30,6 +30,8 @@ type Schema struct {
|
||||
Name string `json:"name" yaml:"name" xml:"name"`
|
||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||
Tables []*Table `json:"tables" yaml:"tables" xml:"-"`
|
||||
Views []*View `json:"views,omitempty" yaml:"views,omitempty" xml:"-"`
|
||||
Sequences []*Sequence `json:"sequences,omitempty" yaml:"sequences,omitempty" xml:"-"`
|
||||
Owner string `json:"owner" yaml:"owner" xml:"owner"`
|
||||
Permissions map[string]string `json:"permissions,omitempty" yaml:"permissions,omitempty" xml:"-"`
|
||||
Comment string `json:"comment,omitempty" yaml:"comment,omitempty" xml:"comment,omitempty"`
|
||||
@@ -84,6 +86,47 @@ func (m Table) GetForeignKeys() []*Constraint {
|
||||
return keys
|
||||
}
|
||||
|
||||
// View represents a database view (read-only)
|
||||
type View struct {
|
||||
Name string `json:"name" yaml:"name" xml:"name"`
|
||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||
Schema string `json:"schema" yaml:"schema" xml:"schema"`
|
||||
Definition string `json:"definition" yaml:"definition" xml:"definition"` // SQL definition
|
||||
Columns map[string]*Column `json:"columns" yaml:"columns" xml:"-"`
|
||||
Comment string `json:"comment,omitempty" yaml:"comment,omitempty" xml:"comment,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty" yaml:"metadata,omitempty" xml:"-"`
|
||||
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
||||
RefSchema *Schema `json:"ref_schema,omitempty" yaml:"ref_schema,omitempty" xml:"ref_schema,omitempty"`
|
||||
}
|
||||
|
||||
// SQLName returns the view name in lowercase
|
||||
func (d *View) SQLName() string {
|
||||
return strings.ToLower(d.Name)
|
||||
}
|
||||
|
||||
// Sequence represents a database sequence (auto-increment generator)
|
||||
type Sequence struct {
|
||||
Name string `json:"name" yaml:"name" xml:"name"`
|
||||
Description string `json:"description,omitempty" yaml:"description,omitempty" xml:"description,omitempty"`
|
||||
Schema string `json:"schema" yaml:"schema" xml:"schema"`
|
||||
StartValue int64 `json:"start_value" yaml:"start_value" xml:"start_value"`
|
||||
MinValue int64 `json:"min_value,omitempty" yaml:"min_value,omitempty" xml:"min_value,omitempty"`
|
||||
MaxValue int64 `json:"max_value,omitempty" yaml:"max_value,omitempty" xml:"max_value,omitempty"`
|
||||
IncrementBy int64 `json:"increment_by" yaml:"increment_by" xml:"increment_by"`
|
||||
CacheSize int64 `json:"cache_size,omitempty" yaml:"cache_size,omitempty" xml:"cache_size,omitempty"`
|
||||
Cycle bool `json:"cycle" yaml:"cycle" xml:"cycle"`
|
||||
OwnedByTable string `json:"owned_by_table,omitempty" yaml:"owned_by_table,omitempty" xml:"owned_by_table,omitempty"`
|
||||
OwnedByColumn string `json:"owned_by_column,omitempty" yaml:"owned_by_column,omitempty" xml:"owned_by_column,omitempty"`
|
||||
Comment string `json:"comment,omitempty" yaml:"comment,omitempty" xml:"comment,omitempty"`
|
||||
Sequence uint `json:"sequence,omitempty" yaml:"sequence,omitempty" xml:"sequence,omitempty"`
|
||||
RefSchema *Schema `json:"ref_schema,omitempty" yaml:"ref_schema,omitempty" xml:"ref_schema,omitempty"`
|
||||
}
|
||||
|
||||
// SQLName returns the sequence name in lowercase
|
||||
func (d *Sequence) SQLName() string {
|
||||
return strings.ToLower(d.Name)
|
||||
}
|
||||
|
||||
// Column represents a table column
|
||||
type Column struct {
|
||||
Name string `json:"name" yaml:"name" xml:"name"`
|
||||
@@ -218,6 +261,8 @@ func InitSchema(name string) *Schema {
|
||||
return &Schema{
|
||||
Name: name,
|
||||
Tables: make([]*Table, 0),
|
||||
Views: make([]*View, 0),
|
||||
Sequences: make([]*Sequence, 0),
|
||||
Permissions: make(map[string]string),
|
||||
Metadata: make(map[string]any),
|
||||
Scripts: make([]*Script, 0),
|
||||
@@ -281,3 +326,23 @@ func InitScript(name string) *Script {
|
||||
RunAfter: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// InitView initializes a new View with empty maps
|
||||
func InitView(name, schema string) *View {
|
||||
return &View{
|
||||
Name: name,
|
||||
Schema: schema,
|
||||
Columns: make(map[string]*Column),
|
||||
Metadata: make(map[string]any),
|
||||
}
|
||||
}
|
||||
|
||||
// InitSequence initializes a new Sequence with default values
|
||||
func InitSequence(name, schema string) *Sequence {
|
||||
return &Sequence{
|
||||
Name: name,
|
||||
Schema: schema,
|
||||
IncrementBy: 1,
|
||||
StartValue: 1,
|
||||
}
|
||||
}
|
||||
|
||||
600
pkg/readers/pgsql/queries.go
Normal file
600
pkg/readers/pgsql/queries.go
Normal file
@@ -0,0 +1,600 @@
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
)
|
||||
|
||||
// querySchemas retrieves all non-system schemas from the database
|
||||
func (r *Reader) querySchemas() ([]*models.Schema, error) {
|
||||
query := `
|
||||
SELECT
|
||||
nspname as schema_name,
|
||||
obj_description(oid, 'pg_namespace') as description
|
||||
FROM pg_namespace
|
||||
WHERE nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND nspname NOT LIKE 'pg_temp_%'
|
||||
AND nspname NOT LIKE 'pg_toast_temp_%'
|
||||
ORDER BY nspname
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
schemas := make([]*models.Schema, 0)
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var description *string
|
||||
|
||||
if err := rows.Scan(&name, &description); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schema := models.InitSchema(name)
|
||||
if description != nil {
|
||||
schema.Description = *description
|
||||
}
|
||||
|
||||
schemas = append(schemas, schema)
|
||||
}
|
||||
|
||||
return schemas, rows.Err()
|
||||
}
|
||||
|
||||
// queryTables retrieves all tables for a given schema
|
||||
func (r *Reader) queryTables(schemaName string) ([]*models.Table, error) {
|
||||
query := `
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
obj_description((schemaname||'.'||tablename)::regclass, 'pg_class') as description
|
||||
FROM pg_tables
|
||||
WHERE schemaname = $1
|
||||
ORDER BY tablename
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
tables := make([]*models.Table, 0)
|
||||
for rows.Next() {
|
||||
var schema, tableName string
|
||||
var description *string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &description); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
table := models.InitTable(tableName, schema)
|
||||
if description != nil {
|
||||
table.Description = *description
|
||||
}
|
||||
|
||||
tables = append(tables, table)
|
||||
}
|
||||
|
||||
return tables, rows.Err()
|
||||
}
|
||||
|
||||
// queryViews retrieves all views for a given schema
|
||||
func (r *Reader) queryViews(schemaName string) ([]*models.View, error) {
|
||||
query := `
|
||||
SELECT
|
||||
schemaname,
|
||||
viewname,
|
||||
definition,
|
||||
obj_description((schemaname||'.'||viewname)::regclass, 'pg_class') as description
|
||||
FROM pg_views
|
||||
WHERE schemaname = $1
|
||||
ORDER BY viewname
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
views := make([]*models.View, 0)
|
||||
for rows.Next() {
|
||||
var schema, viewName, definition string
|
||||
var description *string
|
||||
|
||||
if err := rows.Scan(&schema, &viewName, &definition, &description); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
view := models.InitView(viewName, schema)
|
||||
view.Definition = definition
|
||||
if description != nil {
|
||||
view.Description = *description
|
||||
}
|
||||
|
||||
views = append(views, view)
|
||||
}
|
||||
|
||||
return views, rows.Err()
|
||||
}
|
||||
|
||||
// querySequences retrieves all sequences for a given schema
|
||||
func (r *Reader) querySequences(schemaName string) ([]*models.Sequence, error) {
|
||||
query := `
|
||||
SELECT
|
||||
ps.schemaname,
|
||||
ps.sequencename,
|
||||
ps.start_value,
|
||||
ps.min_value,
|
||||
ps.max_value,
|
||||
ps.increment_by,
|
||||
ps.cycle,
|
||||
ps.cache_size,
|
||||
obj_description((ps.schemaname||'.'||ps.sequencename)::regclass, 'pg_class') as description,
|
||||
owner_table.relname as owned_by_table,
|
||||
owner_column.attname as owned_by_column
|
||||
FROM pg_sequences ps
|
||||
LEFT JOIN pg_class seq_class ON seq_class.relname = ps.sequencename
|
||||
AND seq_class.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = ps.schemaname)
|
||||
LEFT JOIN pg_depend ON pg_depend.objid = seq_class.oid AND pg_depend.deptype = 'a'
|
||||
LEFT JOIN pg_class owner_table ON pg_depend.refobjid = owner_table.oid
|
||||
LEFT JOIN pg_attribute owner_column ON pg_depend.refobjid = owner_column.attrelid
|
||||
AND pg_depend.refobjsubid = owner_column.attnum
|
||||
WHERE ps.schemaname = $1
|
||||
ORDER BY ps.sequencename
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
sequences := make([]*models.Sequence, 0)
|
||||
for rows.Next() {
|
||||
var schema, seqName string
|
||||
var startValue, minValue, maxValue, incrementBy, cacheSize int64
|
||||
var cycle bool
|
||||
var description, tableName, columnName *string
|
||||
|
||||
if err := rows.Scan(&schema, &seqName, &startValue, &minValue, &maxValue, &incrementBy, &cycle, &cacheSize, &description, &tableName, &columnName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
seq := models.InitSequence(seqName, schema)
|
||||
seq.StartValue = startValue
|
||||
seq.MinValue = minValue
|
||||
seq.MaxValue = maxValue
|
||||
seq.IncrementBy = incrementBy
|
||||
seq.Cycle = cycle
|
||||
seq.CacheSize = cacheSize
|
||||
if description != nil {
|
||||
seq.Description = *description
|
||||
}
|
||||
if tableName != nil {
|
||||
seq.OwnedByTable = *tableName
|
||||
}
|
||||
if columnName != nil {
|
||||
seq.OwnedByColumn = *columnName
|
||||
}
|
||||
|
||||
sequences = append(sequences, seq)
|
||||
}
|
||||
|
||||
return sequences, rows.Err()
|
||||
}
|
||||
|
||||
// queryColumns retrieves all columns for tables and views in a schema
|
||||
// Returns map[schema.table]map[columnName]*Column
|
||||
func (r *Reader) queryColumns(schemaName string) (map[string]map[string]*models.Column, error) {
|
||||
query := `
|
||||
SELECT
|
||||
c.table_schema,
|
||||
c.table_name,
|
||||
c.column_name,
|
||||
c.ordinal_position,
|
||||
c.column_default,
|
||||
c.is_nullable,
|
||||
c.data_type,
|
||||
c.character_maximum_length,
|
||||
c.numeric_precision,
|
||||
c.numeric_scale,
|
||||
c.udt_name,
|
||||
col_description((c.table_schema||'.'||c.table_name)::regclass, c.ordinal_position) as description
|
||||
FROM information_schema.columns c
|
||||
WHERE c.table_schema = $1
|
||||
ORDER BY c.table_schema, c.table_name, c.ordinal_position
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
columnsMap := make(map[string]map[string]*models.Column)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, columnName, isNullable, dataType, udtName string
|
||||
var ordinalPosition int
|
||||
var columnDefault, description *string
|
||||
var charMaxLength, numPrecision, numScale *int
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &columnName, &ordinalPosition, &columnDefault, &isNullable, &dataType, &charMaxLength, &numPrecision, &numScale, &udtName, &description); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
column := models.InitColumn(columnName, tableName, schema)
|
||||
column.Type = r.mapDataType(dataType, udtName)
|
||||
column.NotNull = (isNullable == "NO")
|
||||
column.Sequence = uint(ordinalPosition)
|
||||
|
||||
if columnDefault != nil {
|
||||
// Parse default value - remove nextval for sequences
|
||||
defaultVal := *columnDefault
|
||||
if strings.HasPrefix(defaultVal, "nextval") {
|
||||
column.AutoIncrement = true
|
||||
column.Default = defaultVal
|
||||
} else {
|
||||
column.Default = defaultVal
|
||||
}
|
||||
}
|
||||
|
||||
if description != nil {
|
||||
column.Description = *description
|
||||
}
|
||||
|
||||
if charMaxLength != nil {
|
||||
column.Length = *charMaxLength
|
||||
}
|
||||
|
||||
if numPrecision != nil {
|
||||
column.Precision = *numPrecision
|
||||
}
|
||||
|
||||
if numScale != nil {
|
||||
column.Scale = *numScale
|
||||
}
|
||||
|
||||
// Create table key
|
||||
tableKey := schema + "." + tableName
|
||||
if columnsMap[tableKey] == nil {
|
||||
columnsMap[tableKey] = make(map[string]*models.Column)
|
||||
}
|
||||
columnsMap[tableKey][columnName] = column
|
||||
}
|
||||
|
||||
return columnsMap, rows.Err()
|
||||
}
|
||||
|
||||
// queryPrimaryKeys retrieves all primary key constraints for a schema
|
||||
// Returns map[schema.table]*Constraint
|
||||
func (r *Reader) queryPrimaryKeys(schemaName string) (map[string]*models.Constraint, error) {
|
||||
query := `
|
||||
SELECT
|
||||
tc.table_schema,
|
||||
tc.table_name,
|
||||
tc.constraint_name,
|
||||
array_agg(kcu.column_name ORDER BY kcu.ordinal_position) as columns
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.key_column_usage kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
AND tc.table_schema = kcu.table_schema
|
||||
WHERE tc.constraint_type = 'PRIMARY KEY'
|
||||
AND tc.table_schema = $1
|
||||
GROUP BY tc.table_schema, tc.table_name, tc.constraint_name
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
primaryKeys := make(map[string]*models.Constraint)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, constraintName string
|
||||
var columns []string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &constraintName, &columns); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
constraint := models.InitConstraint(constraintName, models.PrimaryKeyConstraint)
|
||||
constraint.Schema = schema
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = columns
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
primaryKeys[tableKey] = constraint
|
||||
}
|
||||
|
||||
return primaryKeys, rows.Err()
|
||||
}
|
||||
|
||||
// queryForeignKeys retrieves all foreign key constraints for a schema
|
||||
// Returns map[schema.table][]*Constraint
|
||||
func (r *Reader) queryForeignKeys(schemaName string) (map[string][]*models.Constraint, error) {
|
||||
query := `
|
||||
SELECT
|
||||
tc.table_schema,
|
||||
tc.table_name,
|
||||
tc.constraint_name,
|
||||
kcu.table_schema as foreign_table_schema,
|
||||
kcu.table_name as foreign_table_name,
|
||||
kcu.column_name as foreign_column,
|
||||
ccu.table_schema as referenced_table_schema,
|
||||
ccu.table_name as referenced_table_name,
|
||||
ccu.column_name as referenced_column,
|
||||
rc.update_rule,
|
||||
rc.delete_rule
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.key_column_usage kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
AND tc.table_schema = kcu.table_schema
|
||||
JOIN information_schema.constraint_column_usage ccu
|
||||
ON ccu.constraint_name = tc.constraint_name
|
||||
JOIN information_schema.referential_constraints rc
|
||||
ON rc.constraint_name = tc.constraint_name
|
||||
AND rc.constraint_schema = tc.table_schema
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_schema = $1
|
||||
ORDER BY tc.table_schema, tc.table_name, tc.constraint_name, kcu.ordinal_position
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// First pass: collect all FK data
|
||||
type fkData struct {
|
||||
schema string
|
||||
tableName string
|
||||
constraintName string
|
||||
foreignColumns []string
|
||||
referencedSchema string
|
||||
referencedTable string
|
||||
referencedColumns []string
|
||||
updateRule string
|
||||
deleteRule string
|
||||
}
|
||||
|
||||
fkMap := make(map[string]*fkData)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, constraintName string
|
||||
var foreignSchema, foreignTable, foreignColumn string
|
||||
var referencedSchema, referencedTable, referencedColumn string
|
||||
var updateRule, deleteRule string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &constraintName, &foreignSchema, &foreignTable, &foreignColumn, &referencedSchema, &referencedTable, &referencedColumn, &updateRule, &deleteRule); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := schema + "." + tableName + "." + constraintName
|
||||
|
||||
if _, exists := fkMap[key]; !exists {
|
||||
fkMap[key] = &fkData{
|
||||
schema: schema,
|
||||
tableName: tableName,
|
||||
constraintName: constraintName,
|
||||
foreignColumns: []string{},
|
||||
referencedSchema: referencedSchema,
|
||||
referencedTable: referencedTable,
|
||||
referencedColumns: []string{},
|
||||
updateRule: updateRule,
|
||||
deleteRule: deleteRule,
|
||||
}
|
||||
}
|
||||
|
||||
fkMap[key].foreignColumns = append(fkMap[key].foreignColumns, foreignColumn)
|
||||
fkMap[key].referencedColumns = append(fkMap[key].referencedColumns, referencedColumn)
|
||||
}
|
||||
|
||||
// Second pass: create constraints
|
||||
foreignKeys := make(map[string][]*models.Constraint)
|
||||
|
||||
for _, fk := range fkMap {
|
||||
constraint := models.InitConstraint(fk.constraintName, models.ForeignKeyConstraint)
|
||||
constraint.Schema = fk.schema
|
||||
constraint.Table = fk.tableName
|
||||
constraint.Columns = fk.foreignColumns
|
||||
constraint.ReferencedSchema = fk.referencedSchema
|
||||
constraint.ReferencedTable = fk.referencedTable
|
||||
constraint.ReferencedColumns = fk.referencedColumns
|
||||
constraint.OnUpdate = fk.updateRule
|
||||
constraint.OnDelete = fk.deleteRule
|
||||
|
||||
tableKey := fk.schema + "." + fk.tableName
|
||||
foreignKeys[tableKey] = append(foreignKeys[tableKey], constraint)
|
||||
}
|
||||
|
||||
return foreignKeys, rows.Err()
|
||||
}
|
||||
|
||||
// queryUniqueConstraints retrieves all unique constraints for a schema
|
||||
// Returns map[schema.table][]*Constraint
|
||||
func (r *Reader) queryUniqueConstraints(schemaName string) (map[string][]*models.Constraint, error) {
|
||||
query := `
|
||||
SELECT
|
||||
tc.table_schema,
|
||||
tc.table_name,
|
||||
tc.constraint_name,
|
||||
array_agg(kcu.column_name ORDER BY kcu.ordinal_position) as columns
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.key_column_usage kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
AND tc.table_schema = kcu.table_schema
|
||||
WHERE tc.constraint_type = 'UNIQUE'
|
||||
AND tc.table_schema = $1
|
||||
GROUP BY tc.table_schema, tc.table_name, tc.constraint_name
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
uniqueConstraints := make(map[string][]*models.Constraint)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, constraintName string
|
||||
var columns []string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &constraintName, &columns); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
constraint := models.InitConstraint(constraintName, models.UniqueConstraint)
|
||||
constraint.Schema = schema
|
||||
constraint.Table = tableName
|
||||
constraint.Columns = columns
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
uniqueConstraints[tableKey] = append(uniqueConstraints[tableKey], constraint)
|
||||
}
|
||||
|
||||
return uniqueConstraints, rows.Err()
|
||||
}
|
||||
|
||||
// queryCheckConstraints retrieves all check constraints for a schema
|
||||
// Returns map[schema.table][]*Constraint
|
||||
func (r *Reader) queryCheckConstraints(schemaName string) (map[string][]*models.Constraint, error) {
|
||||
query := `
|
||||
SELECT
|
||||
tc.table_schema,
|
||||
tc.table_name,
|
||||
tc.constraint_name,
|
||||
cc.check_clause
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.check_constraints cc
|
||||
ON tc.constraint_name = cc.constraint_name
|
||||
WHERE tc.constraint_type = 'CHECK'
|
||||
AND tc.table_schema = $1
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
checkConstraints := make(map[string][]*models.Constraint)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, constraintName, checkClause string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &constraintName, &checkClause); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
constraint := models.InitConstraint(constraintName, models.CheckConstraint)
|
||||
constraint.Schema = schema
|
||||
constraint.Table = tableName
|
||||
constraint.Expression = checkClause
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
checkConstraints[tableKey] = append(checkConstraints[tableKey], constraint)
|
||||
}
|
||||
|
||||
return checkConstraints, rows.Err()
|
||||
}
|
||||
|
||||
// queryIndexes retrieves all indexes for a schema
|
||||
// Returns map[schema.table][]*Index
|
||||
func (r *Reader) queryIndexes(schemaName string) (map[string][]*models.Index, error) {
|
||||
query := `
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
indexdef
|
||||
FROM pg_indexes
|
||||
WHERE schemaname = $1
|
||||
ORDER BY schemaname, tablename, indexname
|
||||
`
|
||||
|
||||
rows, err := r.conn.Query(r.ctx, query, schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
indexes := make(map[string][]*models.Index)
|
||||
|
||||
for rows.Next() {
|
||||
var schema, tableName, indexName, indexDef string
|
||||
|
||||
if err := rows.Scan(&schema, &tableName, &indexName, &indexDef); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
index, err := r.parseIndexDefinition(indexName, tableName, schema, indexDef)
|
||||
if err != nil {
|
||||
// If parsing fails, create a basic index
|
||||
index = models.InitIndex(indexName)
|
||||
index.Table = tableName
|
||||
index.Schema = schema
|
||||
}
|
||||
|
||||
tableKey := schema + "." + tableName
|
||||
indexes[tableKey] = append(indexes[tableKey], index)
|
||||
}
|
||||
|
||||
return indexes, rows.Err()
|
||||
}
|
||||
|
||||
// parseIndexDefinition parses a PostgreSQL index definition
|
||||
func (r *Reader) parseIndexDefinition(indexName, tableName, schema, indexDef string) (*models.Index, error) {
|
||||
index := models.InitIndex(indexName)
|
||||
index.Table = tableName
|
||||
index.Schema = schema
|
||||
|
||||
// Check if unique
|
||||
if strings.Contains(strings.ToUpper(indexDef), "UNIQUE") {
|
||||
index.Unique = true
|
||||
}
|
||||
|
||||
// Extract index method (USING btree, hash, gin, gist, etc.)
|
||||
usingRegex := regexp.MustCompile(`USING\s+(\w+)`)
|
||||
if matches := usingRegex.FindStringSubmatch(indexDef); len(matches) > 1 {
|
||||
index.Type = strings.ToLower(matches[1])
|
||||
} else {
|
||||
index.Type = "btree" // default
|
||||
}
|
||||
|
||||
// Extract columns - pattern: (column1, column2, ...)
|
||||
columnsRegex := regexp.MustCompile(`\(([^)]+)\)`)
|
||||
if matches := columnsRegex.FindStringSubmatch(indexDef); len(matches) > 1 {
|
||||
columnsStr := matches[1]
|
||||
// Split by comma and clean up
|
||||
columnParts := strings.Split(columnsStr, ",")
|
||||
for _, col := range columnParts {
|
||||
col = strings.TrimSpace(col)
|
||||
// Remove any ordering (ASC/DESC) or other modifiers
|
||||
col = strings.Fields(col)[0]
|
||||
// Remove parentheses if it's an expression
|
||||
if !strings.Contains(col, "(") {
|
||||
index.Columns = append(index.Columns, col)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract WHERE clause for partial indexes
|
||||
whereRegex := regexp.MustCompile(`WHERE\s+(.+)$`)
|
||||
if matches := whereRegex.FindStringSubmatch(indexDef); len(matches) > 1 {
|
||||
index.Where = strings.TrimSpace(matches[1])
|
||||
}
|
||||
|
||||
return index, nil
|
||||
}
|
||||
346
pkg/readers/pgsql/reader.go
Normal file
346
pkg/readers/pgsql/reader.go
Normal file
@@ -0,0 +1,346 @@
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/pgsql"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
// Reader implements the readers.Reader interface for PostgreSQL databases
|
||||
type Reader struct {
|
||||
options *readers.ReaderOptions
|
||||
conn *pgx.Conn
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewReader creates a new PostgreSQL reader
|
||||
func NewReader(options *readers.ReaderOptions) *Reader {
|
||||
return &Reader{
|
||||
options: options,
|
||||
ctx: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
// ReadDatabase reads the entire database schema from PostgreSQL
|
||||
func (r *Reader) ReadDatabase() (*models.Database, error) {
|
||||
// Validate connection string
|
||||
if r.options.ConnectionString == "" {
|
||||
return nil, fmt.Errorf("connection string is required")
|
||||
}
|
||||
|
||||
// Connect to the database
|
||||
if err := r.connect(); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
defer r.close()
|
||||
|
||||
// Get database name from connection
|
||||
var dbName string
|
||||
err := r.conn.QueryRow(r.ctx, "SELECT current_database()").Scan(&dbName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get database name: %w", err)
|
||||
}
|
||||
|
||||
// Initialize database model
|
||||
db := models.InitDatabase(dbName)
|
||||
db.DatabaseType = models.PostgresqlDatabaseType
|
||||
db.SourceFormat = "pgsql"
|
||||
|
||||
// Get PostgreSQL version
|
||||
var version string
|
||||
err = r.conn.QueryRow(r.ctx, "SELECT version()").Scan(&version)
|
||||
if err == nil {
|
||||
db.DatabaseVersion = version
|
||||
}
|
||||
|
||||
// Query all schemas
|
||||
schemas, err := r.querySchemas()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query schemas: %w", err)
|
||||
}
|
||||
|
||||
// Process each schema
|
||||
for _, schema := range schemas {
|
||||
// Query tables for this schema
|
||||
tables, err := r.queryTables(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query tables for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
schema.Tables = tables
|
||||
|
||||
// Query views for this schema
|
||||
views, err := r.queryViews(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query views for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
schema.Views = views
|
||||
|
||||
// Query sequences for this schema
|
||||
sequences, err := r.querySequences(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query sequences for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
schema.Sequences = sequences
|
||||
|
||||
// Query columns for tables and views
|
||||
columnsMap, err := r.queryColumns(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query columns for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Populate table columns
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if cols, exists := columnsMap[tableKey]; exists {
|
||||
table.Columns = cols
|
||||
}
|
||||
}
|
||||
|
||||
// Populate view columns
|
||||
for _, view := range schema.Views {
|
||||
viewKey := schema.Name + "." + view.Name
|
||||
if cols, exists := columnsMap[viewKey]; exists {
|
||||
view.Columns = cols
|
||||
}
|
||||
}
|
||||
|
||||
// Query primary keys
|
||||
primaryKeys, err := r.queryPrimaryKeys(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query primary keys for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply primary keys to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if pk, exists := primaryKeys[tableKey]; exists {
|
||||
table.Constraints[pk.Name] = pk
|
||||
// Mark columns as primary key and not null
|
||||
for _, colName := range pk.Columns {
|
||||
if col, colExists := table.Columns[colName]; colExists {
|
||||
col.IsPrimaryKey = true
|
||||
col.NotNull = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query foreign keys
|
||||
foreignKeys, err := r.queryForeignKeys(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query foreign keys for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply foreign keys to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if fks, exists := foreignKeys[tableKey]; exists {
|
||||
for _, fk := range fks {
|
||||
table.Constraints[fk.Name] = fk
|
||||
// Derive relationship from foreign key
|
||||
r.deriveRelationship(table, fk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query unique constraints
|
||||
uniqueConstraints, err := r.queryUniqueConstraints(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query unique constraints for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply unique constraints to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if ucs, exists := uniqueConstraints[tableKey]; exists {
|
||||
for _, uc := range ucs {
|
||||
table.Constraints[uc.Name] = uc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query check constraints
|
||||
checkConstraints, err := r.queryCheckConstraints(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query check constraints for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply check constraints to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if ccs, exists := checkConstraints[tableKey]; exists {
|
||||
for _, cc := range ccs {
|
||||
table.Constraints[cc.Name] = cc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Query indexes
|
||||
indexes, err := r.queryIndexes(schema.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query indexes for schema %s: %w", schema.Name, err)
|
||||
}
|
||||
|
||||
// Apply indexes to tables
|
||||
for _, table := range schema.Tables {
|
||||
tableKey := schema.Name + "." + table.Name
|
||||
if idxs, exists := indexes[tableKey]; exists {
|
||||
for _, idx := range idxs {
|
||||
table.Indexes[idx.Name] = idx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set RefDatabase for schema
|
||||
schema.RefDatabase = db
|
||||
|
||||
// Set RefSchema for tables and views
|
||||
for _, table := range schema.Tables {
|
||||
table.RefSchema = schema
|
||||
}
|
||||
for _, view := range schema.Views {
|
||||
view.RefSchema = schema
|
||||
}
|
||||
for _, seq := range schema.Sequences {
|
||||
seq.RefSchema = schema
|
||||
}
|
||||
|
||||
// Add schema to database
|
||||
db.Schemas = append(db.Schemas, schema)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// ReadSchema reads a single schema (returns the first schema from the database)
|
||||
func (r *Reader) ReadSchema() (*models.Schema, error) {
|
||||
db, err := r.ReadDatabase()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(db.Schemas) == 0 {
|
||||
return nil, fmt.Errorf("no schemas found in database")
|
||||
}
|
||||
return db.Schemas[0], nil
|
||||
}
|
||||
|
||||
// ReadTable reads a single table (returns the first table from the first schema)
|
||||
func (r *Reader) ReadTable() (*models.Table, error) {
|
||||
schema, err := r.ReadSchema()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(schema.Tables) == 0 {
|
||||
return nil, fmt.Errorf("no tables found in schema")
|
||||
}
|
||||
return schema.Tables[0], nil
|
||||
}
|
||||
|
||||
// connect establishes a connection to the PostgreSQL database
|
||||
func (r *Reader) connect() error {
|
||||
conn, err := pgx.Connect(r.ctx, r.options.ConnectionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.conn = conn
|
||||
return nil
|
||||
}
|
||||
|
||||
// close closes the database connection
|
||||
func (r *Reader) close() {
|
||||
if r.conn != nil {
|
||||
r.conn.Close(r.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// mapDataType maps PostgreSQL data types to canonical types
|
||||
func (r *Reader) mapDataType(pgType, udtName string) string {
|
||||
// Map common PostgreSQL types
|
||||
typeMap := map[string]string{
|
||||
"integer": "int",
|
||||
"bigint": "int64",
|
||||
"smallint": "int16",
|
||||
"int": "int",
|
||||
"int2": "int16",
|
||||
"int4": "int",
|
||||
"int8": "int64",
|
||||
"serial": "int",
|
||||
"bigserial": "int64",
|
||||
"smallserial": "int16",
|
||||
"numeric": "decimal",
|
||||
"decimal": "decimal",
|
||||
"real": "float32",
|
||||
"double precision": "float64",
|
||||
"float4": "float32",
|
||||
"float8": "float64",
|
||||
"money": "decimal",
|
||||
"character varying": "string",
|
||||
"varchar": "string",
|
||||
"character": "string",
|
||||
"char": "string",
|
||||
"text": "string",
|
||||
"boolean": "bool",
|
||||
"bool": "bool",
|
||||
"date": "date",
|
||||
"time": "time",
|
||||
"time without time zone": "time",
|
||||
"time with time zone": "timetz",
|
||||
"timestamp": "timestamp",
|
||||
"timestamp without time zone": "timestamp",
|
||||
"timestamp with time zone": "timestamptz",
|
||||
"timestamptz": "timestamptz",
|
||||
"interval": "interval",
|
||||
"uuid": "uuid",
|
||||
"json": "json",
|
||||
"jsonb": "jsonb",
|
||||
"bytea": "bytea",
|
||||
"inet": "inet",
|
||||
"cidr": "cidr",
|
||||
"macaddr": "macaddr",
|
||||
"xml": "xml",
|
||||
}
|
||||
|
||||
// Try mapped type first
|
||||
if mapped, exists := typeMap[pgType]; exists {
|
||||
return mapped
|
||||
}
|
||||
|
||||
// Use pgsql utilities if available
|
||||
if pgsql.ValidSQLType(pgType) {
|
||||
return pgsql.GetSQLType(pgType)
|
||||
}
|
||||
|
||||
// Return UDT name for custom types
|
||||
if udtName != "" {
|
||||
return udtName
|
||||
}
|
||||
|
||||
// Default to the original type
|
||||
return pgType
|
||||
}
|
||||
|
||||
// deriveRelationship creates a relationship from a foreign key constraint
|
||||
func (r *Reader) deriveRelationship(table *models.Table, fk *models.Constraint) {
|
||||
relationshipName := fmt.Sprintf("%s_to_%s", table.Name, fk.ReferencedTable)
|
||||
|
||||
relationship := models.InitRelationship(relationshipName, models.OneToMany)
|
||||
relationship.FromTable = fk.ReferencedTable
|
||||
relationship.FromSchema = fk.ReferencedSchema
|
||||
relationship.ToTable = table.Name
|
||||
relationship.ToSchema = table.Schema
|
||||
relationship.ForeignKey = fk.Name
|
||||
|
||||
// Store constraint actions in properties
|
||||
if fk.OnDelete != "" {
|
||||
relationship.Properties["on_delete"] = fk.OnDelete
|
||||
}
|
||||
if fk.OnUpdate != "" {
|
||||
relationship.Properties["on_update"] = fk.OnUpdate
|
||||
}
|
||||
|
||||
table.Relationships[relationshipName] = relationship
|
||||
}
|
||||
371
pkg/readers/pgsql/reader_test.go
Normal file
371
pkg/readers/pgsql/reader_test.go
Normal file
@@ -0,0 +1,371 @@
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/models"
|
||||
"git.warky.dev/wdevs/relspecgo/pkg/readers"
|
||||
)
|
||||
|
||||
// getTestConnectionString returns a PostgreSQL connection string from environment
|
||||
// or skips the test if not available
|
||||
func getTestConnectionString(t *testing.T) string {
|
||||
connStr := os.Getenv("RELSPEC_TEST_PG_CONN")
|
||||
if connStr == "" {
|
||||
t.Skip("Skipping PostgreSQL reader test: RELSPEC_TEST_PG_CONN environment variable not set")
|
||||
}
|
||||
return connStr
|
||||
}
|
||||
|
||||
func TestReader_ReadDatabase(t *testing.T) {
|
||||
connStr := getTestConnectionString(t)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: connStr,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
db, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read database: %v", err)
|
||||
}
|
||||
|
||||
// Verify database properties
|
||||
if db.Name == "" {
|
||||
t.Error("Database name should not be empty")
|
||||
}
|
||||
|
||||
if db.DatabaseType != models.PostgresqlDatabaseType {
|
||||
t.Errorf("Expected database type %s, got %s", models.PostgresqlDatabaseType, db.DatabaseType)
|
||||
}
|
||||
|
||||
if db.SourceFormat != "pgsql" {
|
||||
t.Errorf("Expected source format 'pgsql', got %s", db.SourceFormat)
|
||||
}
|
||||
|
||||
// Verify schemas
|
||||
if len(db.Schemas) == 0 {
|
||||
t.Error("Expected at least one schema, got none")
|
||||
}
|
||||
|
||||
// Check that system schemas are excluded
|
||||
for _, schema := range db.Schemas {
|
||||
if schema.Name == "pg_catalog" || schema.Name == "information_schema" {
|
||||
t.Errorf("System schema %s should be excluded", schema.Name)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Successfully read database '%s' with %d schemas", db.Name, len(db.Schemas))
|
||||
|
||||
// Log schema details
|
||||
for _, schema := range db.Schemas {
|
||||
t.Logf(" Schema: %s (Tables: %d, Views: %d, Sequences: %d)",
|
||||
schema.Name, len(schema.Tables), len(schema.Views), len(schema.Sequences))
|
||||
|
||||
// Verify tables have columns
|
||||
for _, table := range schema.Tables {
|
||||
if len(table.Columns) == 0 {
|
||||
t.Logf(" Warning: Table %s.%s has no columns", schema.Name, table.Name)
|
||||
} else {
|
||||
t.Logf(" Table: %s.%s (Columns: %d, Constraints: %d, Indexes: %d, Relationships: %d)",
|
||||
schema.Name, table.Name, len(table.Columns), len(table.Constraints),
|
||||
len(table.Indexes), len(table.Relationships))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify views have columns and definitions
|
||||
for _, view := range schema.Views {
|
||||
if view.Definition == "" {
|
||||
t.Errorf("View %s.%s should have a definition", schema.Name, view.Name)
|
||||
}
|
||||
t.Logf(" View: %s.%s (Columns: %d)", schema.Name, view.Name, len(view.Columns))
|
||||
}
|
||||
|
||||
// Verify sequences
|
||||
for _, seq := range schema.Sequences {
|
||||
if seq.IncrementBy == 0 {
|
||||
t.Errorf("Sequence %s.%s should have non-zero increment", schema.Name, seq.Name)
|
||||
}
|
||||
t.Logf(" Sequence: %s.%s (Start: %d, Increment: %d)", schema.Name, seq.Name, seq.StartValue, seq.IncrementBy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader_ReadSchema(t *testing.T) {
|
||||
connStr := getTestConnectionString(t)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: connStr,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
schema, err := reader.ReadSchema()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read schema: %v", err)
|
||||
}
|
||||
|
||||
if schema.Name == "" {
|
||||
t.Error("Schema name should not be empty")
|
||||
}
|
||||
|
||||
t.Logf("Successfully read schema '%s' with %d tables, %d views, %d sequences",
|
||||
schema.Name, len(schema.Tables), len(schema.Views), len(schema.Sequences))
|
||||
}
|
||||
|
||||
func TestReader_ReadTable(t *testing.T) {
|
||||
connStr := getTestConnectionString(t)
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: connStr,
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
table, err := reader.ReadTable()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read table: %v", err)
|
||||
}
|
||||
|
||||
if table.Name == "" {
|
||||
t.Error("Table name should not be empty")
|
||||
}
|
||||
|
||||
if table.Schema == "" {
|
||||
t.Error("Table schema should not be empty")
|
||||
}
|
||||
|
||||
t.Logf("Successfully read table '%s.%s' with %d columns",
|
||||
table.Schema, table.Name, len(table.Columns))
|
||||
}
|
||||
|
||||
func TestReader_ReadDatabase_InvalidConnectionString(t *testing.T) {
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: "invalid connection string",
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
_, err := reader.ReadDatabase()
|
||||
if err == nil {
|
||||
t.Error("Expected error with invalid connection string, got nil")
|
||||
}
|
||||
|
||||
t.Logf("Correctly rejected invalid connection string: %v", err)
|
||||
}
|
||||
|
||||
func TestReader_ReadDatabase_EmptyConnectionString(t *testing.T) {
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: "",
|
||||
}
|
||||
|
||||
reader := NewReader(options)
|
||||
_, err := reader.ReadDatabase()
|
||||
if err == nil {
|
||||
t.Error("Expected error with empty connection string, got nil")
|
||||
}
|
||||
|
||||
expectedMsg := "connection string is required"
|
||||
if err.Error() != expectedMsg {
|
||||
t.Errorf("Expected error message '%s', got '%s'", expectedMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapDataType(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
pgType string
|
||||
udtName string
|
||||
expected string
|
||||
}{
|
||||
{"integer", "int4", "int"},
|
||||
{"bigint", "int8", "int64"},
|
||||
{"smallint", "int2", "int16"},
|
||||
{"character varying", "varchar", "string"},
|
||||
{"text", "text", "string"},
|
||||
{"boolean", "bool", "bool"},
|
||||
{"timestamp without time zone", "timestamp", "timestamp"},
|
||||
{"timestamp with time zone", "timestamptz", "timestamptz"},
|
||||
{"json", "json", "json"},
|
||||
{"jsonb", "jsonb", "jsonb"},
|
||||
{"uuid", "uuid", "uuid"},
|
||||
{"numeric", "numeric", "decimal"},
|
||||
{"real", "float4", "float32"},
|
||||
{"double precision", "float8", "float64"},
|
||||
{"date", "date", "date"},
|
||||
{"time without time zone", "time", "time"},
|
||||
{"bytea", "bytea", "bytea"},
|
||||
{"unknown_type", "custom", "custom"}, // Should return UDT name
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.pgType, func(t *testing.T) {
|
||||
result := reader.mapDataType(tt.pgType, tt.udtName)
|
||||
if result != tt.expected {
|
||||
t.Errorf("mapDataType(%s, %s) = %s, expected %s", tt.pgType, tt.udtName, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIndexDefinition(t *testing.T) {
|
||||
reader := &Reader{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
indexName string
|
||||
tableName string
|
||||
schema string
|
||||
indexDef string
|
||||
wantType string
|
||||
wantUnique bool
|
||||
wantColumns int
|
||||
}{
|
||||
{
|
||||
name: "simple btree index",
|
||||
indexName: "idx_users_email",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_email ON public.users USING btree (email)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
wantColumns: 1,
|
||||
},
|
||||
{
|
||||
name: "unique index",
|
||||
indexName: "idx_users_username",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE UNIQUE INDEX idx_users_username ON public.users USING btree (username)",
|
||||
wantType: "btree",
|
||||
wantUnique: true,
|
||||
wantColumns: 1,
|
||||
},
|
||||
{
|
||||
name: "composite index",
|
||||
indexName: "idx_users_name",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_name ON public.users USING btree (first_name, last_name)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
wantColumns: 2,
|
||||
},
|
||||
{
|
||||
name: "gin index",
|
||||
indexName: "idx_posts_tags",
|
||||
tableName: "posts",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_posts_tags ON public.posts USING gin (tags)",
|
||||
wantType: "gin",
|
||||
wantUnique: false,
|
||||
wantColumns: 1,
|
||||
},
|
||||
{
|
||||
name: "partial index with where clause",
|
||||
indexName: "idx_users_active",
|
||||
tableName: "users",
|
||||
schema: "public",
|
||||
indexDef: "CREATE INDEX idx_users_active ON public.users USING btree (id) WHERE (active = true)",
|
||||
wantType: "btree",
|
||||
wantUnique: false,
|
||||
wantColumns: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
index, err := reader.parseIndexDefinition(tt.indexName, tt.tableName, tt.schema, tt.indexDef)
|
||||
if err != nil {
|
||||
t.Fatalf("parseIndexDefinition() error = %v", err)
|
||||
}
|
||||
|
||||
if index.Name != tt.indexName {
|
||||
t.Errorf("Name = %s, want %s", index.Name, tt.indexName)
|
||||
}
|
||||
|
||||
if index.Type != tt.wantType {
|
||||
t.Errorf("Type = %s, want %s", index.Type, tt.wantType)
|
||||
}
|
||||
|
||||
if index.Unique != tt.wantUnique {
|
||||
t.Errorf("Unique = %v, want %v", index.Unique, tt.wantUnique)
|
||||
}
|
||||
|
||||
if len(index.Columns) != tt.wantColumns {
|
||||
t.Errorf("Columns count = %d, want %d (columns: %v)", len(index.Columns), tt.wantColumns, index.Columns)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeriveRelationship(t *testing.T) {
|
||||
table := models.InitTable("orders", "public")
|
||||
|
||||
fk := models.InitConstraint("fk_orders_user_id", models.ForeignKeyConstraint)
|
||||
fk.Schema = "public"
|
||||
fk.Table = "orders"
|
||||
fk.Columns = []string{"user_id"}
|
||||
fk.ReferencedSchema = "public"
|
||||
fk.ReferencedTable = "users"
|
||||
fk.ReferencedColumns = []string{"id"}
|
||||
fk.OnDelete = "CASCADE"
|
||||
fk.OnUpdate = "RESTRICT"
|
||||
|
||||
reader := &Reader{}
|
||||
reader.deriveRelationship(table, fk)
|
||||
|
||||
if len(table.Relationships) != 1 {
|
||||
t.Fatalf("Expected 1 relationship, got %d", len(table.Relationships))
|
||||
}
|
||||
|
||||
relName := "orders_to_users"
|
||||
rel, exists := table.Relationships[relName]
|
||||
if !exists {
|
||||
t.Fatalf("Expected relationship '%s', not found", relName)
|
||||
}
|
||||
|
||||
if rel.Type != models.OneToMany {
|
||||
t.Errorf("Expected relationship type %s, got %s", models.OneToMany, rel.Type)
|
||||
}
|
||||
|
||||
if rel.FromTable != "users" {
|
||||
t.Errorf("Expected FromTable 'users', got '%s'", rel.FromTable)
|
||||
}
|
||||
|
||||
if rel.ToTable != "orders" {
|
||||
t.Errorf("Expected ToTable 'orders', got '%s'", rel.ToTable)
|
||||
}
|
||||
|
||||
if rel.ForeignKey != "fk_orders_user_id" {
|
||||
t.Errorf("Expected ForeignKey 'fk_orders_user_id', got '%s'", rel.ForeignKey)
|
||||
}
|
||||
|
||||
if rel.Properties["on_delete"] != "CASCADE" {
|
||||
t.Errorf("Expected on_delete 'CASCADE', got '%s'", rel.Properties["on_delete"])
|
||||
}
|
||||
|
||||
if rel.Properties["on_update"] != "RESTRICT" {
|
||||
t.Errorf("Expected on_update 'RESTRICT', got '%s'", rel.Properties["on_update"])
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark tests
|
||||
func BenchmarkReader_ReadDatabase(b *testing.B) {
|
||||
connStr := os.Getenv("RELSPEC_TEST_PG_CONN")
|
||||
if connStr == "" {
|
||||
b.Skip("Skipping benchmark: RELSPEC_TEST_PG_CONN environment variable not set")
|
||||
}
|
||||
|
||||
options := &readers.ReaderOptions{
|
||||
ConnectionString: connStr,
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
reader := NewReader(options)
|
||||
_, err := reader.ReadDatabase()
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to read database: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
131
tests/postgres/README.md
Normal file
131
tests/postgres/README.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# PostgreSQL Integration Tests
|
||||
|
||||
This directory contains Docker-based integration tests for the PostgreSQL reader.
|
||||
|
||||
## Quick Start
|
||||
|
||||
Run integration tests with Docker:
|
||||
|
||||
```bash
|
||||
# From project root
|
||||
make docker-test
|
||||
```
|
||||
|
||||
Or use the script directly:
|
||||
|
||||
```bash
|
||||
./tests/postgres/run_tests.sh
|
||||
```
|
||||
|
||||
## Manual Testing
|
||||
|
||||
Start the PostgreSQL test database:
|
||||
|
||||
```bash
|
||||
make docker-up
|
||||
```
|
||||
|
||||
Run tests with the running database:
|
||||
|
||||
```bash
|
||||
export RELSPEC_TEST_PG_CONN="postgres://relspec:relspec_test_password@localhost:5433/relspec_test"
|
||||
go test -v ./pkg/readers/pgsql/
|
||||
```
|
||||
|
||||
Stop the database:
|
||||
|
||||
```bash
|
||||
make docker-down
|
||||
```
|
||||
|
||||
## Test Database Contents
|
||||
|
||||
The test database (`relspec_test`) contains:
|
||||
|
||||
### Schemas (3)
|
||||
- **public**: Main schema with user and blog data
|
||||
- **analytics**: Event tracking and analytics data
|
||||
- **inventory**: Product inventory management
|
||||
|
||||
### Tables (14 total)
|
||||
- **public.users**: User accounts with various data types
|
||||
- **public.posts**: Blog posts with foreign keys
|
||||
- **public.comments**: Nested comments (self-referencing FK)
|
||||
- **public.categories**: Post categories
|
||||
- **public.post_categories**: Many-to-many junction table
|
||||
- **analytics.events**: Event tracking
|
||||
- **analytics.page_views**: Page view analytics
|
||||
- **analytics.conversions**: Conversion tracking
|
||||
- **inventory.products**: Product catalog
|
||||
- **inventory.warehouses**: Warehouse locations
|
||||
- **inventory.stock**: Product stock by warehouse
|
||||
|
||||
### Views (8 total)
|
||||
- **public.active_users**: Active users only
|
||||
- **public.published_posts**: Published posts with author info
|
||||
- **public.post_stats**: Post statistics
|
||||
- **analytics.daily_stats**: Daily aggregated statistics
|
||||
- **analytics.top_pages**: Most viewed pages
|
||||
- **inventory.available_products**: Available products with stock
|
||||
- **inventory.low_stock_products**: Products with low inventory
|
||||
|
||||
### Sequences (5 total)
|
||||
- **public.users_id_seq**: Users ID sequence
|
||||
- **public.posts_id_seq**: Posts ID sequence (with cycle)
|
||||
- **analytics.events_id_seq**: Events ID sequence
|
||||
- Plus auto-generated sequences for serial columns
|
||||
|
||||
### Constraint Types
|
||||
- **Primary Keys**: All tables have PKs
|
||||
- **Foreign Keys**: Including cross-schema references
|
||||
- **Unique Constraints**: Username, email, SKU, etc.
|
||||
- **Check Constraints**: Age limits, status validation, balance checks
|
||||
|
||||
### Index Types
|
||||
- **btree**: Standard B-tree indexes
|
||||
- **gin**: GIN indexes for JSONB and array columns
|
||||
- **hash**: Hash indexes for session lookups
|
||||
- **Unique**: Unique indexes
|
||||
- **Partial**: Conditional indexes (WHERE clauses)
|
||||
- **Composite**: Multi-column indexes
|
||||
|
||||
## Connection Details
|
||||
|
||||
- **Host**: localhost
|
||||
- **Port**: 5433 (to avoid conflicts with local PostgreSQL)
|
||||
- **Database**: relspec_test
|
||||
- **User**: relspec
|
||||
- **Password**: relspec_test_password
|
||||
|
||||
Full connection string:
|
||||
```
|
||||
postgres://relspec:relspec_test_password@localhost:5433/relspec_test
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- Docker or Docker Compose
|
||||
- Go 1.25+
|
||||
|
||||
## Makefile Targets
|
||||
|
||||
- `make docker-up` - Start PostgreSQL container
|
||||
- `make docker-down` - Stop PostgreSQL container
|
||||
- `make docker-test` - Run full test suite with automatic cleanup
|
||||
- `make docker-test-integration` - Run integration tests only
|
||||
|
||||
## Test Coverage
|
||||
|
||||
The tests verify:
|
||||
- Schema introspection (excluding system schemas)
|
||||
- Table metadata extraction
|
||||
- View definitions and columns
|
||||
- Sequence properties and ownership
|
||||
- Column data types and constraints
|
||||
- Primary key detection
|
||||
- Foreign key relationships with referential actions
|
||||
- Unique and check constraints
|
||||
- Index types and definitions
|
||||
- Cross-schema relationships
|
||||
- Data type mapping
|
||||
- Error handling
|
||||
510
tests/postgres/init.sql
Normal file
510
tests/postgres/init.sql
Normal file
@@ -0,0 +1,510 @@
|
||||
-- RelSpec Test Database Initialization
|
||||
-- This script creates a comprehensive test database with multiple schemas,
|
||||
-- tables, views, sequences, constraints, indexes, and relationships
|
||||
|
||||
-- Enable extensions
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- ============================================================================
|
||||
-- SCHEMA: public (default schema)
|
||||
-- ============================================================================
|
||||
|
||||
-- Sequences
|
||||
CREATE SEQUENCE public.users_id_seq
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
CREATE SEQUENCE public.posts_id_seq
|
||||
START WITH 100
|
||||
INCREMENT BY 1
|
||||
MINVALUE 100
|
||||
MAXVALUE 9999999
|
||||
CACHE 10
|
||||
CYCLE;
|
||||
|
||||
-- Tables
|
||||
CREATE TABLE public.users (
|
||||
id bigint NOT NULL DEFAULT nextval('users_id_seq'::regclass),
|
||||
username character varying(50) NOT NULL,
|
||||
email character varying(100) NOT NULL,
|
||||
first_name character varying(50),
|
||||
last_name character varying(50),
|
||||
age integer,
|
||||
is_active boolean DEFAULT true,
|
||||
created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at timestamp without time zone,
|
||||
metadata jsonb,
|
||||
profile_data json,
|
||||
birth_date date,
|
||||
balance numeric(10,2) DEFAULT 0.00,
|
||||
rating real,
|
||||
score double precision,
|
||||
user_uuid uuid DEFAULT uuid_generate_v4(),
|
||||
bio text,
|
||||
avatar_url text
|
||||
);
|
||||
|
||||
COMMENT ON TABLE public.users IS 'User accounts table';
|
||||
COMMENT ON COLUMN public.users.id IS 'Primary key';
|
||||
COMMENT ON COLUMN public.users.username IS 'Unique username';
|
||||
COMMENT ON COLUMN public.users.email IS 'User email address';
|
||||
|
||||
CREATE TABLE public.posts (
|
||||
id bigint NOT NULL DEFAULT nextval('posts_id_seq'::regclass),
|
||||
user_id bigint NOT NULL,
|
||||
title character varying(200) NOT NULL,
|
||||
content text,
|
||||
status character varying(20) DEFAULT 'draft'::character varying,
|
||||
published_at timestamp with time zone,
|
||||
view_count integer DEFAULT 0,
|
||||
tags text[],
|
||||
created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
COMMENT ON TABLE public.posts IS 'Blog posts table';
|
||||
|
||||
CREATE TABLE public.comments (
|
||||
id bigserial NOT NULL,
|
||||
post_id bigint NOT NULL,
|
||||
user_id bigint NOT NULL,
|
||||
parent_comment_id bigint,
|
||||
content text NOT NULL,
|
||||
created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE public.categories (
|
||||
id serial NOT NULL,
|
||||
name character varying(100) NOT NULL,
|
||||
slug character varying(100) NOT NULL,
|
||||
description text
|
||||
);
|
||||
|
||||
CREATE TABLE public.post_categories (
|
||||
post_id bigint NOT NULL,
|
||||
category_id integer NOT NULL,
|
||||
assigned_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Primary Keys
|
||||
ALTER TABLE ONLY public.users
|
||||
ADD CONSTRAINT users_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY public.posts
|
||||
ADD CONSTRAINT posts_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY public.comments
|
||||
ADD CONSTRAINT comments_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY public.categories
|
||||
ADD CONSTRAINT categories_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY public.post_categories
|
||||
ADD CONSTRAINT post_categories_pkey PRIMARY KEY (post_id, category_id);
|
||||
|
||||
-- Unique Constraints
|
||||
ALTER TABLE ONLY public.users
|
||||
ADD CONSTRAINT users_username_key UNIQUE (username);
|
||||
|
||||
ALTER TABLE ONLY public.users
|
||||
ADD CONSTRAINT users_email_key UNIQUE (email);
|
||||
|
||||
ALTER TABLE ONLY public.categories
|
||||
ADD CONSTRAINT categories_slug_key UNIQUE (slug);
|
||||
|
||||
-- Check Constraints
|
||||
ALTER TABLE public.users
|
||||
ADD CONSTRAINT users_age_check CHECK ((age >= 0 AND age <= 150));
|
||||
|
||||
ALTER TABLE public.posts
|
||||
ADD CONSTRAINT posts_status_check CHECK (status IN ('draft', 'published', 'archived'));
|
||||
|
||||
ALTER TABLE public.users
|
||||
ADD CONSTRAINT users_balance_check CHECK (balance >= 0);
|
||||
|
||||
-- Foreign Keys
|
||||
ALTER TABLE ONLY public.posts
|
||||
ADD CONSTRAINT fk_posts_user_id
|
||||
FOREIGN KEY (user_id)
|
||||
REFERENCES public.users(id)
|
||||
ON DELETE CASCADE
|
||||
ON UPDATE RESTRICT;
|
||||
|
||||
ALTER TABLE ONLY public.comments
|
||||
ADD CONSTRAINT fk_comments_post_id
|
||||
FOREIGN KEY (post_id)
|
||||
REFERENCES public.posts(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY public.comments
|
||||
ADD CONSTRAINT fk_comments_user_id
|
||||
FOREIGN KEY (user_id)
|
||||
REFERENCES public.users(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY public.comments
|
||||
ADD CONSTRAINT fk_comments_parent
|
||||
FOREIGN KEY (parent_comment_id)
|
||||
REFERENCES public.comments(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY public.post_categories
|
||||
ADD CONSTRAINT fk_post_categories_post_id
|
||||
FOREIGN KEY (post_id)
|
||||
REFERENCES public.posts(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY public.post_categories
|
||||
ADD CONSTRAINT fk_post_categories_category_id
|
||||
FOREIGN KEY (category_id)
|
||||
REFERENCES public.categories(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_users_email ON public.users USING btree (email);
|
||||
CREATE INDEX idx_users_created_at ON public.users USING btree (created_at);
|
||||
CREATE INDEX idx_users_last_name_first_name ON public.users USING btree (last_name, first_name);
|
||||
CREATE INDEX idx_users_active ON public.users USING btree (id) WHERE (is_active = true);
|
||||
|
||||
CREATE INDEX idx_posts_user_id ON public.posts USING btree (user_id);
|
||||
CREATE INDEX idx_posts_status ON public.posts USING btree (status);
|
||||
CREATE INDEX idx_posts_published_at ON public.posts USING btree (published_at) WHERE (published_at IS NOT NULL);
|
||||
CREATE INDEX idx_posts_tags ON public.posts USING gin (tags);
|
||||
|
||||
CREATE INDEX idx_comments_post_id ON public.comments USING btree (post_id);
|
||||
CREATE INDEX idx_comments_user_id ON public.comments USING btree (user_id);
|
||||
CREATE INDEX idx_comments_parent ON public.comments USING btree (parent_comment_id);
|
||||
|
||||
CREATE UNIQUE INDEX idx_categories_slug_unique ON public.categories USING btree (slug);
|
||||
|
||||
-- Views
|
||||
CREATE VIEW public.active_users AS
|
||||
SELECT id, username, email, first_name, last_name, created_at
|
||||
FROM public.users
|
||||
WHERE is_active = true;
|
||||
|
||||
COMMENT ON VIEW public.active_users IS 'View of active users only';
|
||||
|
||||
CREATE VIEW public.published_posts AS
|
||||
SELECT
|
||||
p.id,
|
||||
p.title,
|
||||
p.content,
|
||||
p.published_at,
|
||||
p.view_count,
|
||||
u.username as author_username,
|
||||
u.email as author_email
|
||||
FROM public.posts p
|
||||
JOIN public.users u ON p.user_id = u.id
|
||||
WHERE p.status = 'published';
|
||||
|
||||
COMMENT ON VIEW public.published_posts IS 'Published posts with author information';
|
||||
|
||||
CREATE VIEW public.post_stats AS
|
||||
SELECT
|
||||
p.id as post_id,
|
||||
p.title,
|
||||
COUNT(DISTINCT c.id) as comment_count,
|
||||
COUNT(DISTINCT pc.category_id) as category_count
|
||||
FROM public.posts p
|
||||
LEFT JOIN public.comments c ON c.post_id = p.id
|
||||
LEFT JOIN public.post_categories pc ON pc.post_id = p.id
|
||||
GROUP BY p.id, p.title;
|
||||
|
||||
-- Set sequence ownership
|
||||
ALTER SEQUENCE public.users_id_seq OWNED BY public.users.id;
|
||||
ALTER SEQUENCE public.posts_id_seq OWNED BY public.posts.id;
|
||||
|
||||
-- ============================================================================
|
||||
-- SCHEMA: analytics (secondary schema for testing multi-schema support)
|
||||
-- ============================================================================
|
||||
|
||||
CREATE SCHEMA analytics;
|
||||
COMMENT ON SCHEMA analytics IS 'Analytics data schema';
|
||||
|
||||
-- Sequences
|
||||
CREATE SEQUENCE analytics.events_id_seq
|
||||
START WITH 1000
|
||||
INCREMENT BY 1;
|
||||
|
||||
-- Tables
|
||||
CREATE TABLE analytics.events (
|
||||
id bigint NOT NULL DEFAULT nextval('analytics.events_id_seq'::regclass),
|
||||
event_type character varying(50) NOT NULL,
|
||||
user_id bigint,
|
||||
session_id uuid,
|
||||
event_data jsonb,
|
||||
created_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
COMMENT ON TABLE analytics.events IS 'User event tracking';
|
||||
|
||||
CREATE TABLE analytics.page_views (
|
||||
id bigserial NOT NULL,
|
||||
event_id bigint NOT NULL,
|
||||
page_url text NOT NULL,
|
||||
referrer text,
|
||||
user_agent text,
|
||||
ip_address inet,
|
||||
duration_seconds integer
|
||||
);
|
||||
|
||||
CREATE TABLE analytics.conversions (
|
||||
id serial NOT NULL,
|
||||
user_id bigint NOT NULL,
|
||||
conversion_type character varying(50) NOT NULL,
|
||||
revenue numeric(12,2),
|
||||
converted_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Primary Keys
|
||||
ALTER TABLE ONLY analytics.events
|
||||
ADD CONSTRAINT events_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY analytics.page_views
|
||||
ADD CONSTRAINT page_views_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY analytics.conversions
|
||||
ADD CONSTRAINT conversions_pkey PRIMARY KEY (id);
|
||||
|
||||
-- Foreign Keys (cross-schema references)
|
||||
ALTER TABLE ONLY analytics.events
|
||||
ADD CONSTRAINT fk_events_user_id
|
||||
FOREIGN KEY (user_id)
|
||||
REFERENCES public.users(id)
|
||||
ON DELETE SET NULL;
|
||||
|
||||
ALTER TABLE ONLY analytics.page_views
|
||||
ADD CONSTRAINT fk_page_views_event_id
|
||||
FOREIGN KEY (event_id)
|
||||
REFERENCES analytics.events(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY analytics.conversions
|
||||
ADD CONSTRAINT fk_conversions_user_id
|
||||
FOREIGN KEY (user_id)
|
||||
REFERENCES public.users(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
-- Check Constraints
|
||||
ALTER TABLE analytics.conversions
|
||||
ADD CONSTRAINT conversions_revenue_check CHECK (revenue >= 0);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_events_user_id ON analytics.events USING btree (user_id);
|
||||
CREATE INDEX idx_events_created_at ON analytics.events USING btree (created_at);
|
||||
CREATE INDEX idx_events_event_type ON analytics.events USING btree (event_type);
|
||||
CREATE INDEX idx_events_data ON analytics.events USING gin (event_data);
|
||||
CREATE INDEX idx_events_session ON analytics.events USING hash (session_id);
|
||||
|
||||
CREATE INDEX idx_page_views_event_id ON analytics.page_views USING btree (event_id);
|
||||
CREATE INDEX idx_conversions_user_id ON analytics.conversions USING btree (user_id);
|
||||
|
||||
-- Views
|
||||
CREATE VIEW analytics.daily_stats AS
|
||||
SELECT
|
||||
DATE(created_at) as date,
|
||||
COUNT(*) as total_events,
|
||||
COUNT(DISTINCT user_id) as unique_users,
|
||||
COUNT(DISTINCT session_id) as unique_sessions
|
||||
FROM analytics.events
|
||||
GROUP BY DATE(created_at);
|
||||
|
||||
CREATE VIEW analytics.top_pages AS
|
||||
SELECT
|
||||
pv.page_url,
|
||||
COUNT(*) as view_count,
|
||||
AVG(pv.duration_seconds) as avg_duration
|
||||
FROM analytics.page_views pv
|
||||
GROUP BY pv.page_url
|
||||
ORDER BY view_count DESC
|
||||
LIMIT 100;
|
||||
|
||||
-- Set sequence ownership
|
||||
ALTER SEQUENCE analytics.events_id_seq OWNED BY analytics.events.id;
|
||||
|
||||
-- ============================================================================
|
||||
-- SCHEMA: inventory (third schema for comprehensive testing)
|
||||
-- ============================================================================
|
||||
|
||||
CREATE SCHEMA inventory;
|
||||
|
||||
CREATE TABLE inventory.products (
|
||||
id serial NOT NULL,
|
||||
sku character varying(50) NOT NULL,
|
||||
name character varying(200) NOT NULL,
|
||||
description text,
|
||||
price numeric(10,2) NOT NULL,
|
||||
stock_quantity integer DEFAULT 0,
|
||||
is_available boolean DEFAULT true
|
||||
);
|
||||
|
||||
CREATE TABLE inventory.warehouses (
|
||||
id serial NOT NULL,
|
||||
name character varying(100) NOT NULL,
|
||||
location character varying(200),
|
||||
capacity integer
|
||||
);
|
||||
|
||||
CREATE TABLE inventory.stock (
|
||||
product_id integer NOT NULL,
|
||||
warehouse_id integer NOT NULL,
|
||||
quantity integer DEFAULT 0,
|
||||
last_updated timestamp without time zone DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Primary Keys
|
||||
ALTER TABLE ONLY inventory.products
|
||||
ADD CONSTRAINT products_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY inventory.warehouses
|
||||
ADD CONSTRAINT warehouses_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY inventory.stock
|
||||
ADD CONSTRAINT stock_pkey PRIMARY KEY (product_id, warehouse_id);
|
||||
|
||||
-- Unique Constraints
|
||||
ALTER TABLE ONLY inventory.products
|
||||
ADD CONSTRAINT products_sku_key UNIQUE (sku);
|
||||
|
||||
-- Check Constraints
|
||||
ALTER TABLE inventory.products
|
||||
ADD CONSTRAINT products_price_check CHECK (price >= 0);
|
||||
|
||||
ALTER TABLE inventory.products
|
||||
ADD CONSTRAINT products_stock_check CHECK (stock_quantity >= 0);
|
||||
|
||||
ALTER TABLE inventory.stock
|
||||
ADD CONSTRAINT stock_quantity_check CHECK (quantity >= 0);
|
||||
|
||||
-- Foreign Keys
|
||||
ALTER TABLE ONLY inventory.stock
|
||||
ADD CONSTRAINT fk_stock_product_id
|
||||
FOREIGN KEY (product_id)
|
||||
REFERENCES inventory.products(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY inventory.stock
|
||||
ADD CONSTRAINT fk_stock_warehouse_id
|
||||
FOREIGN KEY (warehouse_id)
|
||||
REFERENCES inventory.warehouses(id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
-- Indexes
|
||||
CREATE UNIQUE INDEX idx_products_sku ON inventory.products USING btree (sku);
|
||||
CREATE INDEX idx_products_available ON inventory.products USING btree (id) WHERE (is_available = true);
|
||||
CREATE INDEX idx_stock_product_id ON inventory.stock USING btree (product_id);
|
||||
CREATE INDEX idx_stock_warehouse_id ON inventory.stock USING btree (warehouse_id);
|
||||
|
||||
-- Views
|
||||
CREATE VIEW inventory.available_products AS
|
||||
SELECT id, sku, name, price, stock_quantity
|
||||
FROM inventory.products
|
||||
WHERE is_available = true AND stock_quantity > 0;
|
||||
|
||||
CREATE VIEW inventory.low_stock_products AS
|
||||
SELECT p.id, p.sku, p.name, p.stock_quantity
|
||||
FROM inventory.products p
|
||||
WHERE p.stock_quantity < 10 AND p.is_available = true;
|
||||
|
||||
-- ============================================================================
|
||||
-- Insert Sample Data
|
||||
-- ============================================================================
|
||||
|
||||
-- Users
|
||||
INSERT INTO public.users (username, email, first_name, last_name, age, is_active, balance, bio) VALUES
|
||||
('john_doe', 'john@example.com', 'John', 'Doe', 30, true, 100.50, 'Software developer'),
|
||||
('jane_smith', 'jane@example.com', 'Jane', 'Smith', 25, true, 250.00, 'Designer'),
|
||||
('bob_jones', 'bob@example.com', 'Bob', 'Jones', 35, false, 50.00, 'Manager'),
|
||||
('alice_wilson', 'alice@example.com', 'Alice', 'Wilson', 28, true, 500.75, 'Data scientist');
|
||||
|
||||
-- Categories
|
||||
INSERT INTO public.categories (name, slug, description) VALUES
|
||||
('Technology', 'technology', 'Tech related posts'),
|
||||
('Design', 'design', 'Design and UX posts'),
|
||||
('Business', 'business', 'Business and strategy');
|
||||
|
||||
-- Posts (with explicit IDs)
|
||||
INSERT INTO public.posts (id, user_id, title, content, status, view_count, tags) VALUES
|
||||
(1, 1, 'Getting Started with PostgreSQL', 'A comprehensive guide...', 'published', 150, ARRAY['database', 'postgresql', 'tutorial']),
|
||||
(2, 1, 'Advanced SQL Queries', 'Deep dive into complex queries...', 'published', 200, ARRAY['sql', 'advanced']),
|
||||
(3, 2, 'UI Design Principles', 'Best practices for UI design...', 'published', 300, ARRAY['design', 'ui', 'ux']),
|
||||
(4, 4, 'Data Analysis with Python', 'Introduction to pandas...', 'draft', 0, ARRAY['python', 'data']);
|
||||
|
||||
-- Reset sequence to avoid conflicts
|
||||
SELECT setval('public.posts_id_seq', 100);
|
||||
|
||||
-- Post Categories
|
||||
INSERT INTO public.post_categories (post_id, category_id) VALUES
|
||||
(1, 1), (2, 1), (3, 2), (4, 1);
|
||||
|
||||
-- Comments
|
||||
INSERT INTO public.comments (post_id, user_id, content) VALUES
|
||||
(1, 2, 'Great article!'),
|
||||
(1, 4, 'Very helpful, thanks!'),
|
||||
(2, 3, 'Can you elaborate on joins?');
|
||||
|
||||
-- Analytics Events
|
||||
INSERT INTO analytics.events (event_type, user_id, event_data) VALUES
|
||||
('page_view', 1, '{"page": "/posts/1"}'),
|
||||
('page_view', 2, '{"page": "/posts/1"}'),
|
||||
('click', 1, '{"element": "share_button"}'),
|
||||
('conversion', 4, '{"type": "signup"}');
|
||||
|
||||
-- Products
|
||||
INSERT INTO inventory.products (sku, name, description, price, stock_quantity) VALUES
|
||||
('PROD-001', 'Laptop', 'High-performance laptop', 999.99, 50),
|
||||
('PROD-002', 'Mouse', 'Wireless mouse', 29.99, 200),
|
||||
('PROD-003', 'Keyboard', 'Mechanical keyboard', 79.99, 5);
|
||||
|
||||
-- Warehouses
|
||||
INSERT INTO inventory.warehouses (name, location, capacity) VALUES
|
||||
('Main Warehouse', 'New York', 10000),
|
||||
('West Coast Warehouse', 'San Francisco', 5000);
|
||||
|
||||
-- Stock
|
||||
INSERT INTO inventory.stock (product_id, warehouse_id, quantity) VALUES
|
||||
(1, 1, 30), (1, 2, 20),
|
||||
(2, 1, 150), (2, 2, 50),
|
||||
(3, 1, 5);
|
||||
|
||||
-- Grant permissions (for testing purposes)
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO relspec;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO relspec;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA analytics TO relspec;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA analytics TO relspec;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA inventory TO relspec;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA inventory TO relspec;
|
||||
|
||||
-- Analyze tables for query optimization
|
||||
ANALYZE;
|
||||
|
||||
-- Summary
|
||||
DO $$
|
||||
DECLARE
|
||||
schema_count integer;
|
||||
table_count integer;
|
||||
view_count integer;
|
||||
sequence_count integer;
|
||||
BEGIN
|
||||
SELECT COUNT(*) INTO schema_count FROM pg_namespace
|
||||
WHERE nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
AND nspname NOT LIKE 'pg_%';
|
||||
|
||||
SELECT COUNT(*) INTO table_count FROM pg_tables
|
||||
WHERE schemaname NOT IN ('pg_catalog', 'information_schema');
|
||||
|
||||
SELECT COUNT(*) INTO view_count FROM pg_views
|
||||
WHERE schemaname NOT IN ('pg_catalog', 'information_schema');
|
||||
|
||||
SELECT COUNT(*) INTO sequence_count FROM pg_sequences
|
||||
WHERE schemaname NOT IN ('pg_catalog', 'information_schema');
|
||||
|
||||
RAISE NOTICE 'Database initialization complete!';
|
||||
RAISE NOTICE 'Schemas: %', schema_count;
|
||||
RAISE NOTICE 'Tables: %', table_count;
|
||||
RAISE NOTICE 'Views: %', view_count;
|
||||
RAISE NOTICE 'Sequences: %', sequence_count;
|
||||
END $$;
|
||||
95
tests/postgres/run_tests.sh
Executable file
95
tests/postgres/run_tests.sh
Executable file
@@ -0,0 +1,95 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# RelSpec PostgreSQL Integration Tests Runner
|
||||
# This script starts a PostgreSQL Docker container and runs integration tests
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
echo "=== RelSpec PostgreSQL Integration Tests ==="
|
||||
echo ""
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Check if docker-compose is available
|
||||
if ! command -v docker-compose &> /dev/null && ! command -v docker &> /dev/null; then
|
||||
echo -e "${RED}Error: docker-compose or docker is not installed${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Determine docker compose command
|
||||
if command -v docker-compose &> /dev/null; then
|
||||
DOCKER_COMPOSE="docker-compose"
|
||||
else
|
||||
DOCKER_COMPOSE="docker compose"
|
||||
fi
|
||||
|
||||
# Change to project root
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Function to cleanup
|
||||
cleanup() {
|
||||
echo -e "\n${YELLOW}Cleaning up...${NC}"
|
||||
$DOCKER_COMPOSE down
|
||||
}
|
||||
|
||||
# Trap exit to cleanup
|
||||
trap cleanup EXIT
|
||||
|
||||
# Start PostgreSQL container
|
||||
echo -e "${YELLOW}Starting PostgreSQL container...${NC}"
|
||||
$DOCKER_COMPOSE up -d postgres
|
||||
|
||||
# Wait for PostgreSQL to be ready
|
||||
echo -e "${YELLOW}Waiting for PostgreSQL to be ready...${NC}"
|
||||
max_attempts=30
|
||||
attempt=0
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
if $DOCKER_COMPOSE exec -T postgres pg_isready -U relspec -d relspec_test &> /dev/null; then
|
||||
echo -e "${GREEN}PostgreSQL is ready!${NC}"
|
||||
break
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
echo -n "."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo -e "\n${RED}Error: PostgreSQL failed to start${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Give it one more second to fully initialize
|
||||
sleep 2
|
||||
|
||||
# Show database stats
|
||||
echo -e "\n${YELLOW}Database Information:${NC}"
|
||||
$DOCKER_COMPOSE exec -T postgres psql -U relspec -d relspec_test -c "
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM pg_namespace WHERE nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast') AND nspname NOT LIKE 'pg_%') as schemas,
|
||||
(SELECT COUNT(*) FROM pg_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as tables,
|
||||
(SELECT COUNT(*) FROM pg_views WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as views,
|
||||
(SELECT COUNT(*) FROM pg_sequences WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as sequences;
|
||||
"
|
||||
|
||||
# Set environment variable for tests
|
||||
export RELSPEC_TEST_PG_CONN="postgres://relspec:relspec_test_password@localhost:5433/relspec_test"
|
||||
|
||||
echo -e "\n${YELLOW}Running PostgreSQL reader tests...${NC}"
|
||||
echo "Connection string: $RELSPEC_TEST_PG_CONN"
|
||||
echo ""
|
||||
|
||||
# Run the tests
|
||||
if go test -v ./pkg/readers/pgsql/ -count=1; then
|
||||
echo -e "\n${GREEN}✓ All tests passed!${NC}"
|
||||
exit 0
|
||||
else
|
||||
echo -e "\n${RED}✗ Tests failed${NC}"
|
||||
exit 1
|
||||
fi
|
||||
110
tests/postgres/run_tests_podman.sh
Executable file
110
tests/postgres/run_tests_podman.sh
Executable file
@@ -0,0 +1,110 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# RelSpec PostgreSQL Integration Tests Runner (Podman version)
|
||||
# This script starts a PostgreSQL Podman container and runs integration tests
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
echo "=== RelSpec PostgreSQL Integration Tests (Podman) ==="
|
||||
echo ""
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Container configuration
|
||||
CONTAINER_NAME="relspec-test-postgres"
|
||||
POSTGRES_USER="relspec"
|
||||
POSTGRES_PASSWORD="relspec_test_password"
|
||||
POSTGRES_DB="relspec_test"
|
||||
POSTGRES_PORT="5433"
|
||||
|
||||
# Check if podman is available
|
||||
if ! command -v podman &> /dev/null; then
|
||||
echo -e "${RED}Error: podman is not installed${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Change to project root
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Function to cleanup
|
||||
cleanup() {
|
||||
echo -e "\n${YELLOW}Cleaning up...${NC}"
|
||||
podman stop "$CONTAINER_NAME" 2>/dev/null || true
|
||||
podman rm "$CONTAINER_NAME" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Trap exit to cleanup
|
||||
trap cleanup EXIT
|
||||
|
||||
# Stop and remove existing container if it exists
|
||||
echo -e "${YELLOW}Cleaning up any existing containers...${NC}"
|
||||
podman stop "$CONTAINER_NAME" 2>/dev/null || true
|
||||
podman rm "$CONTAINER_NAME" 2>/dev/null || true
|
||||
|
||||
# Start PostgreSQL container
|
||||
echo -e "${YELLOW}Starting PostgreSQL container...${NC}"
|
||||
podman run -d \
|
||||
--name "$CONTAINER_NAME" \
|
||||
-e POSTGRES_USER="$POSTGRES_USER" \
|
||||
-e POSTGRES_PASSWORD="$POSTGRES_PASSWORD" \
|
||||
-e POSTGRES_DB="$POSTGRES_DB" \
|
||||
-p "$POSTGRES_PORT:5432" \
|
||||
-v "$SCRIPT_DIR/init.sql:/docker-entrypoint-initdb.d/init.sql:Z" \
|
||||
docker.io/library/postgres:16-alpine
|
||||
|
||||
# Wait for PostgreSQL to be ready
|
||||
echo -e "${YELLOW}Waiting for PostgreSQL to be ready...${NC}"
|
||||
max_attempts=30
|
||||
attempt=0
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
if podman exec "$CONTAINER_NAME" pg_isready -U "$POSTGRES_USER" -d "$POSTGRES_DB" &> /dev/null; then
|
||||
echo -e "${GREEN}PostgreSQL is ready!${NC}"
|
||||
break
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
echo -n "."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo -e "\n${RED}Error: PostgreSQL failed to start${NC}"
|
||||
podman logs "$CONTAINER_NAME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Give it one more second to fully initialize
|
||||
sleep 2
|
||||
|
||||
# Show database stats
|
||||
echo -e "\n${YELLOW}Database Information:${NC}"
|
||||
podman exec "$CONTAINER_NAME" psql -U "$POSTGRES_USER" -d "$POSTGRES_DB" -c "
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM pg_namespace WHERE nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast') AND nspname NOT LIKE 'pg_%') as schemas,
|
||||
(SELECT COUNT(*) FROM pg_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as tables,
|
||||
(SELECT COUNT(*) FROM pg_views WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as views,
|
||||
(SELECT COUNT(*) FROM pg_sequences WHERE schemaname NOT IN ('pg_catalog', 'information_schema')) as sequences;
|
||||
"
|
||||
|
||||
# Set environment variable for tests
|
||||
export RELSPEC_TEST_PG_CONN="postgres://$POSTGRES_USER:$POSTGRES_PASSWORD@localhost:$POSTGRES_PORT/$POSTGRES_DB"
|
||||
|
||||
echo -e "\n${YELLOW}Running PostgreSQL reader tests...${NC}"
|
||||
echo "Connection string: $RELSPEC_TEST_PG_CONN"
|
||||
echo ""
|
||||
|
||||
# Run the tests
|
||||
cd "$PROJECT_ROOT"
|
||||
if go test -v ./pkg/readers/pgsql/ -count=1; then
|
||||
echo -e "\n${GREEN}✓ All tests passed!${NC}"
|
||||
exit 0
|
||||
else
|
||||
echo -e "\n${RED}✗ Tests failed${NC}"
|
||||
exit 1
|
||||
fi
|
||||
Reference in New Issue
Block a user