Added more examples and pgsql reader
Some checks are pending
CI / Build (push) Waiting to run
CI / Test (1.23) (push) Waiting to run
CI / Test (1.24) (push) Waiting to run
CI / Test (1.25) (push) Waiting to run
CI / Lint (push) Waiting to run

This commit is contained in:
2025-12-17 10:08:50 +02:00
parent 5d60bc3b2c
commit db6cd21511
22 changed files with 6741 additions and 1 deletions

View File

@@ -0,0 +1,715 @@
----select dropall('f_audit_build');
create or replace function meta.f_audit_build(
p_mode integer = 0,
p_schema text default ''
)
returns table(
scriptpriority integer
,scripttype citext
,objectname citext
,scriptcode citext
,schemaname citext
)
language plpgsql volatile
as
$$
---p_mode=
--0:all
--1:only functions
--2:only creates
--3:only drops
declare
--Error Handling--
m_funcname text = 'f_audit_build';
m_errmsg text;
m_errcontext text;
m_errdetail text;
m_errhint text;
m_errstate text;
m_retval integer;
--Error Handling--
m_sql citext;
m_sql2 citext;
m_auditcol citext;
m_dkeyfield citext;
m_pkfield citext;
m_skipcoreschema citext[];
r_record record;
r_recsub record;
begin
--columntype:
-- 0=Normal Audited
--, 1=Primary Key
--, 2=Primary Key Audited
--, 3=UserColumn
--, 4=Normal Encrypted Audited
--, 5 = Parent Table Primary Key for lookup (Must give Foreign Table)
--, 6 = Parent Table Primary Key for delete (Must give Foreign Table)
--, 9=View only, used for custom audit. e.g pplan
--specialtype:
-- 0,null = Leave alone.
-- 1=Convert int date to sql date.
-- 2=Convert int time to sql time.,
-- 3=Status Field, --Note that when ColumnType is 3,Status field, the ForeignTable column must have the name of the search sent to F_GetDescription_Code
-- 4=ForeignTable
-- 5=ForeignTable, tableprefix
-- 6 = Blob Column, will compare with size only
-- 7 = Blob Column, will compare with hash only
-- 8 = Hide
m_skipcoreschema = array[]::citext[];
SET LOCAL statement_timeout = '90s';
drop table if exists tmp_f_audittables;
CREATE TEMP TABLE tmp_f_audittables(
tablename citext
,schemaname citext
,tableprefix citext
,columnname citext
,columndesc citext
,columntype integer
,datatype citext
,specialtype integer
,foreigntable citext
,foreigncolumn citext
,foreignpk citext
,addversion integer
,savetoschema citext
,PRIMARY KEY(columnname, schemaname,tablename, columntype)
);
--CREATE INDEX idx_tmp_f_audittables ON tmp_f_audittables (tablename, columnname);
if exists (
select r.routine_name
from information_schema.routines r
where r.routine_name::citext = 'f_getauditinfo'
and r.specific_schema = 'public'
) and exists (
select 1
from information_schema.columns c
where c.table_name = 't_adproclientcreditorl1'
and c.table_schema = 'public'
)
then
m_skipcoreschema = array_append(m_skipcoreschema,'public'::citext);
insert into tmp_f_audittables(tablename
,schemaname
,tableprefix
,columnname
,columndesc
,columntype
,datatype
,specialtype
,foreigntable
,foreigncolumn
,foreignpk
,savetoschema
)
select distinct on (r.columnname, r.tablename, r.columntype)
r.tablename
,'public' as schemaname
,r.tableprefix
,r.columnname
,r.columndescription
,r.columntype
,r.datatype
,r.specialtype
,r.foreigntable
,r.foreigncolumn
,r.foreignpk
,'public' as savetoschema
from meta.migration_table t2
inner join public.f_getauditinfo(t2.tablename) r on t2.ismodel
and t2.tablename = r.tablename
and t2.schemaname = 'public'
;
end if;
if exists (
select r.routine_name
from information_schema.routines r
where r.routine_name::citext = 'f_getauditinfo'
and r.specific_schema = 'meta'
)
then
insert into tmp_f_audittables(tablename
,schemaname
,tableprefix
,columnname
,columndesc
,columntype
,datatype
,specialtype
,foreigntable
,foreigncolumn
,foreignpk
,savetoschema
)
select distinct on (r.columnname, r.tablename, r.columntype)
r.tablename
,r.schemaname
,r.tableprefix
,r.columnname
,r.columndescription
,r.columntype
,r.datatype
,r.specialtype
,r.foreigntable
,r.foreigncolumn
,r.foreignpk
,case when t2.schemaname = 'public' then 'public' else 'core' end as savetoschema
from meta.migration_table t2
inner join meta.f_getauditinfo(format('%s.%s',t2.schemaname,t2.tablename)) r on t2.ismodel
and t2.tablename = r.tablename
and t2.schemaname = r.schemaname
and coalesce(r.schemaname,'') not in ('meta','')
where coalesce(r.schemaname,'') <> all(m_skipcoreschema)
;
delete from tmp_f_audittables a
where a.tablename in (
select distinct b.tablename
from tmp_f_audittables b
where b.columntype = 9
);
if exists (
select r.routine_name
from information_schema.routines r
where r.routine_name::citext = 'f_getauditversion'
and r.specific_schema = 'meta'
)
then
update tmp_f_audittables at
set addversion = av.mode
from (
select r.tablename
,r.columnname
,r.mode
from meta.f_getauditversion('') r
) av
where at.tablename::citext = av.tablename::citext
and at.columnname::citext = av.columnname::citext
;
end if;
end if;
delete from tmp_f_audittables t
where t.tablename in (
'version', 'upgrades', 'upgradescript', 'eventlog', 'event_notify', 'atevent',
'atdetail', 'procused', 'lastprocrun', 'intface_transmission', 'setting','programmodule'
,'_loginsession','event_notify','bt_enums','authsession'
)
or t.tablename in (select t.tablename from meta.f_upgrade_table() t );
for r_record in (
select distinct on (a.tablename,a.schemaname ,a.tableprefix)
nv(a.tablename)::citext as tablename
,nv(a.schemaname)::citext as schemaname
,nv(a.tableprefix)::citext as tableprefix
,COALESCE((select 1
from tmp_f_audittables a2
where a2.tablename = a.tablename
and a2.addversion = 2
limit 1
),0) as addversion
,nv(a.savetoschema,'core') as savetoschema
from tmp_f_audittables a
inner join information_schema.tables t on lower(t.table_name) = lower(a.tablename)
and lower(t.table_schema) = lower(a.schemaname)
)
loop
m_sql = '';
m_sql2 = '';
m_auditcol = 'if ( ';
m_dkeyfield = '';
m_pkfield = '';
--raise notice 'Tbl: %', r_record.tableprefix;
for r_recsub in (
select nv(a.columnname) as columnname
,nv(a.columntype) as columntype
,nv(a.datatype) as datatype
,nv(a.specialtype) as specialtype
,nv(a.foreigntable) as foreigntable
,nv(a.foreigncolumn) as foreigncolumn
,nv(a.foreignpk) as foreignpk
,nv(a.addversion) as addversion
,row_number() over (order by a.tablename, a.columnname) rownr
,r_record.savetoschema as savetoschema
from tmp_f_audittables a
where a.tablename = r_record.tablename
and a.schemaname = r_record.schemaname
and exists (
select 1
from information_schema.columns c
where lower(c.table_name) = lower(a.tablename)
and lower(c.table_schema) = lower(a.schemaname)
and lower(c.column_name) = lower(a.columnname)
)
)
loop
--raise notice 'Col: %' ,r_recsub.columnname;
if r_recsub.columntype in (0,2,4)
then
if r_recsub.datatype = 'jsonb'
then
m_sql = m_sql || chr(13) || chr(10) || ' '
|| 'if (old.'|| r_recsub.columnname ||' IS DISTINCT FROM new.'|| r_recsub.columnname || ') then'
|| chr(13) || chr(10) || ' '
|| case when r_recsub.savetoschema = 'core'
then 'insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto)'
else 'insert into public.atdetail(rid_parent, datacolumn, changedfrom, changedto)'
end
|| chr(13) || chr(10)
|| $SS$
select m_atevent, k
, case when r.jsonvalue->k->>'action' = 'insert' then null else r.jsonvalue->k->>'oldvalue' end as changedfrom
, case when r.jsonvalue->k->>'action' = 'delete' then null else r.jsonvalue->k->>'value' end as changedto
from f_json_keydiff(to_jsonb(new.$SS$|| r_recsub.columnname || $SS$), to_jsonb(old.$SS$|| r_recsub.columnname || $SS$),'$SS$|| r_recsub.columnname || $SS$') r(jsonvalue)
cross join jsonb_object_keys(r.jsonvalue) k
;
$SS$
|| chr(13) || chr(10) || ' end if;'
|| chr(13) || chr(10) ;
else
--detail for updates
m_sql = m_sql || chr(13) || chr(10) || ' '
|| 'if (old.'|| r_recsub.columnname ||' IS DISTINCT FROM new.'|| r_recsub.columnname || ') then'
|| chr(13) || chr(10) || ' '
|| case when r_recsub.savetoschema = 'core'
then 'insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto)'
else 'insert into public.atdetail(rid_parent, datacolumn, changedfrom, changedto)'
end
|| chr(13) || chr(10) || ' '
|| 'values (m_atevent, '''|| r_recsub.columnname ||''', substr('
|| case --OLD
--TIME
when r_recsub.specialtype = 2 then 'f_inttotm(old.'|| r_recsub.columnname || ')::text'
--desc code lookup
when r_recsub.specialtype = 3 and r_recsub.columntype = 0 and COALESCE(r_recsub.foreigntable,'') <> ''
then 'f_getdesccode(''' || r_recsub.foreigntable || ''', old.'|| r_recsub.columnname || '::text)::text'
--foreign table sub select
when r_recsub.specialtype in (4,9) and r_recsub.columntype = 0 and COALESCE(r_recsub.foreigntable,'') <> ''
then case when r_recsub.foreigncolumn ilike '(%' then '(select ' else '(select ft.' end --Detect sub select.
|| r_recsub.foreigncolumn || ' from ' || r_recsub.foreigntable || ' ft where ft.'|| r_recsub.foreignpk
||' = old.'|| r_recsub.columnname || ' limit 1)::text'
--blobs, size
when r_recsub.specialtype = 6 and r_recsub.columntype in (0,3,4) and r_recsub.datatype = 'bytea'
then 'octet_length(old.' || r_recsub.columnname ||')::text'
--blobs, md5
when r_recsub.specialtype = 7 and r_recsub.columntype in (0,3,4) and r_recsub.datatype = 'bytea'
then '( select h.p_hash from pl_hashbytes(old.' || r_recsub.columnname ||') h limit 1 )::text'
--Hide (All)
when r_recsub.specialtype = 8 or r_recsub.columntype = 4
then '''****************''::text'
--default
else ' old.'|| r_recsub.columnname || '::text'
end
||',1,1000) , substr('
|| case --NEW
--TIME
when r_recsub.specialtype = 2 then 'f_inttotm(new.'|| r_recsub.columnname || ')::text'
--desc code lookup
when r_recsub.specialtype = 3 and r_recsub.columntype = 0 and COALESCE(r_recsub.foreigntable,'') <> ''
then 'f_getdesccode(''' || r_recsub.foreigntable || ''', new.'|| r_recsub.columnname || '::text)::text'
--foreign table sub select
when r_recsub.specialtype = 4 and r_recsub.columntype = 0 and COALESCE(r_recsub.foreigntable,'') <> ''
then '(select ft.' || r_recsub.foreigncolumn || ' from ' || r_recsub.foreigntable || ' ft where ft.'|| r_recsub.foreignpk
||' = new.'|| r_recsub.columnname || ' limit 1)::text'
--foreign table sub select with table prefix
when r_recsub.specialtype = 5 and COALESCE(r_recsub.foreigntable,'') <> ''
then '(select ft.' || r_recsub.foreigncolumn || ' from ' || r_recsub.foreigntable || ' ft where ft.'|| r_recsub.foreignpk
||' = new.'|| r_recsub.columnname || ' limit 1)::text'
--blobs, size
when r_recsub.specialtype = 6 and r_recsub.columntype in (0,3,4) and r_recsub.datatype = 'bytea'
then 'octet_length(new.' || r_recsub.columnname ||')::text'
--blobs, md5
when r_recsub.specialtype = 7 and r_recsub.columntype in (0,3,4) and r_recsub.datatype = 'bytea'
then '( select h.p_hash from pl_hashbytes(new.' || r_recsub.columnname ||') h limit 1 )::text'
--Hide (All)
when r_recsub.specialtype = 8 or r_recsub.columntype = 4
then '''****************''::text'
--default
else ' new.'|| r_recsub.columnname || '::text'
end
||',1,1000));' || chr(13) || chr(10) || ' '
|| case when r_recsub.addversion = 1 then E'\n '
|| case when r_recsub.savetoschema = 'public'
then E' insert into public.atdetail(rid_parent, datacolumn, changedfrom, changedto)
select m_atevent, ''version'', null,(select version from version limit 1) \n
where not exists (
select 1
from public.atdetail ad
where ad.rid_parent = m_atevent
and ad.datacolumn = ''version''
) and not (changedfrom is null and changedto is null)
;\n
'
else E' insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto)
select m_atevent, ''version'', null,(select version from version limit 1) \n
where not exists (
select 1
from core.atdetail ad
where ad.rid_atevent = m_atevent
and ad.datacolumn = ''version''
) and not (changedfrom is null and changedto is null)
;\n'
end || E' \n'
else ''
end
|| chr(13) || chr(10) || ' end if;'
|| chr(13) || chr(10) ;
end if;
--detail for deletes
m_sql2 = m_sql2 || chr(13) || chr(10) || ' '
|| case when r_recsub.savetoschema = 'public'
then 'insert into public.atdetail(rid_parent, datacolumn, changedfrom, changedto)'
else 'insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto)'
end
|| chr(13) || chr(10) || ' '
|| 'values (m_atevent, '''|| r_recsub.columnname ||''', ' || case
when r_recsub.datatype ilike '%bytea%'
then '( select h.p_hash from pl_hashbytes(old.' || r_recsub.columnname ||') h limit 1 )::text '
when r_recsub.datatype ilike '%int%' or r_recsub.datatype ilike '%numeric%' or r_recsub.datatype ilike '%real%'
then ' old.'|| r_recsub.columnname ||'::text '
else ' substr(old.'|| r_recsub.columnname ||'::text,1,1000) '
end || ', null);'
|| chr(13) || chr(10) ;
m_auditcol = m_auditcol || 'old.'|| r_recsub.columnname ||' IS DISTINCT FROM new.'|| r_recsub.columnname
|| case when r_recsub.rownr % 3 = 0 then chr(13) || chr(10)
else ''
end || ' or ';
end if;
if (r_recsub.columntype in (1,2))
then
m_pkfield = r_recsub.columnname;
end if;
if r_recsub.columntype = 6
then
m_dkeyfield = r_recsub.columnname;
end if;
end loop;
m_auditcol = m_auditcol || ' 1 = 0 ) then';
if nv(p_mode) in (0,3)
then
return query
select r.scriptpriority::integer
,r.scripttype::citext
,r.objectname::citext
,r.scriptcode::citext
,r_record.schemaname::citext
from (
select
340 as scriptpriority
,'drop audit trigger' as scripttype
,'t_audit_'||r_record.tablename as objectname
,'do $C$
begin
SET LOCAL statement_timeout = ''10s'';
DROP TRIGGER IF EXISTS t_audit_' || r_record.tablename
||E'\n ON ' || r_record.schemaname || '.' || r_record.tablename ||';
end;
$C$
' as scriptcode
) r
;
end if;
if nv(m_pkfield) = ''
then
raise notice 'No Primary Key for %. Please fix this in f_getauditinfo.',r_record.tablename;
--perform log_event(m_funcname,format(' as No Primary Key for %s %s. Please fix this in f_getauditinfo.' ,r_record.tablename, r_record.tableprefix),bt_enum('eventlog','error'));
continue;
end if;
if nv(p_mode) in (0,1)
then
return query
select r.scriptpriority::integer
,r.scripttype::citext
,r.objectname::citext
,r.scriptcode::citext
,r_record.schemaname::citext
from (
select 345 as scriptpriority
,'create audit function ' as scripttype
,r_record.schemaname || '.' || 'ft_audit_'||rtrim(r_record.tablename) as objectname
,--------------------------- String ends at $QUOTED$ with comments.
$QUOTED$
CREATE OR REPLACE FUNCTION $QUOTED$ || r_record.schemaname || $QUOTED$.ft_audit_$QUOTED$ || rtrim(r_record.tablename) || $QUOTED$ (
)
RETURNS trigger AS
$body$
DECLARE
--Error Handling--
m_funcname text = 'ft_audit_$QUOTED$ || rtrim(r_record.tablename) || $QUOTED$';
m_errmsg text;
m_errcontext text;
m_errdetail text;
m_errhint text;
m_errstate text;
m_retval integer;
--Error Handling--
r_adtrec record;
m_query text;
m_user citext;
m_atevent integer;
BEGIN
if core.not_triggerable(tg_name, tg_table_schema, tg_table_name,tg_op)
then
return null;
end if;
if TG_TABLE_NAME::citext <> $QUOTED$ || quote_literal(r_record.tablename) || $QUOTED$::citext
then
IF (TG_OP = 'DELETE') THEN
RETURN OLD;
ELSIF (TG_OP = 'UPDATE') THEN
RETURN NEW;
ELSIF (TG_OP = 'INSERT') THEN
RETURN NEW;
END IF;
end if;
m_user = f_getuser();
if m_user::citext in ('noaudit', 'importuser')
or meta.f_audit_check(TG_TABLE_NAME::text, TG_OP::text, m_user::citext, row_to_json(NEW), row_to_json(OLD))
then
IF (TG_OP = 'DELETE') THEN
RETURN OLD;
ELSIF (TG_OP = 'UPDATE') THEN
RETURN NEW;
ELSIF (TG_OP = 'INSERT') THEN
RETURN NEW;
END IF;
end if;
$QUOTED$ ||
-------------------core--------------
case when r_record.savetoschema = 'core' then $QUOTED$
if TG_OP = 'INSERT'
then
with rows_ins as (
insert into core.atevent (tablename, rid_parent, changeuser, changedate,changetime,actionx)
values('$QUOTED$ || r_record.tablename || $QUOTED$', new.$QUOTED$ || m_pkfield || $QUOTED$, m_user, clock_timestamp()::date, clock_timestamp()::time, 1)
returning rid_atevent
)
select rid_atevent
from rows_ins
into m_atevent;
$QUOTED$ || case
when r_record.addversion = 1 then E'
insert into core.atdetail(rid_atevent, datacolumn, changedfrom, changedto) \n
select m_atevent, $Q2$version$Q2$, null,(select version from version limit 1) \n
where not exists (select 1 from core.atdetail ad where ad.rid_atevent = m_atevent and ad.datacolumn = $Q2$version$Q2$);
' else '' end ||
$QUOTED$
elsif TG_OP = 'UPDATE'
then
$QUOTED$ || m_auditcol || $QUOTED$
with rows_ins as (
insert into core.atevent (tablename, rid_parent, changeuser, changedate,changetime,actionx)
values('$QUOTED$ || r_record.tablename || $QUOTED$', new.$QUOTED$ || m_pkfield || $QUOTED$, m_user, clock_timestamp()::date, clock_timestamp()::time, 2)
returning rid_atevent
)
select rid_atevent
from rows_ins
into m_atevent;
$QUOTED$ || m_sql || $QUOTED$
end if;
elsif TG_OP in ('DELETE', 'TRUNCATE')
then
with rows_ins as (
insert into core.atevent(tablename, rid_parent, changeuser, changedate,changetime,actionx, rid_deletedparent)
values('$QUOTED$ || r_record.tablename || $QUOTED$'
, old.$QUOTED$ || m_pkfield || $QUOTED$
, m_user
, clock_timestamp()::date
, clock_timestamp()::time
, 3
, $QUOTED$ || case when nv(m_dkeyfield) = '' then 'null' else format('old.%s',m_dkeyfield) end || $QUOTED$)
returning rid_atevent
)
select rid_atevent
from rows_ins
into m_atevent;
$QUOTED$
-------------------publio--------------
else $QUOTED$
if TG_OP = 'INSERT'
then
with rows_ins as (
insert into public.atevent(tableprefix, rid_parent, changeuser, changedate,changetime,actionx)
values('$QUOTED$ || r_record.tableprefix || $QUOTED$', new.$QUOTED$ || m_pkfield || $QUOTED$, m_user, clock_timestamp()::date, f_tmtoint(clock_timestamp()::time), 1)
returning rid_atevent
)
select rid_atevent
from rows_ins
into m_atevent;
$QUOTED$ || case
when r_record.addversion = 1 then E'
insert into public.atdetail(rid_atevent, datacolumn, changedfrom, changedto) \n
select m_atevent, $Q2$version$Q2$, null,(select version from version limit 1) \n
where not exists (select 1 from public.atdetail ad where ad.rid_atevent = m_atevent and ad.datacolumn = $Q2$version$Q2$);
' else '' end ||
$QUOTED$
elsif TG_OP = 'UPDATE'
then
$QUOTED$ || m_auditcol || $QUOTED$
with rows_ins as (
insert into public.atevent (tableprefix, rid_parent, changeuser, changedate,changetime,actionx)
values('$QUOTED$ || r_record.tableprefix || $QUOTED$', new.$QUOTED$ || m_pkfield || $QUOTED$, m_user, clock_timestamp()::date, f_tmtoint(clock_timestamp()::time), 2)
returning rid_atevent
)
select rid_atevent
from rows_ins
into m_atevent;
$QUOTED$ || m_sql || $QUOTED$
end if;
elsif TG_OP in ('DELETE', 'TRUNCATE')
then
with rows_ins as (
insert into public.atevent (tableprefix, rid_parent, changeuser, changedate,changetime,actionx, rid_deletedparent)
values('$QUOTED$ || r_record.tableprefix || $QUOTED$'
, old.$QUOTED$ || m_pkfield || $QUOTED$
, m_user
, clock_timestamp()::date
, f_tmtoint(clock_timestamp()::time)
, 3
, $QUOTED$ || case when nv(m_dkeyfield) = '' then 'null' else format('old.%s',m_dkeyfield) end || $QUOTED$)
returning rid_atevent
)
select rid_atevent
from rows_ins
into m_atevent;
$QUOTED$ end
|| $QUOTED$
$QUOTED$ || m_sql2 || $QUOTED$
end if;
IF (TG_OP = 'DELETE') THEN
RETURN OLD;
ELSIF (TG_OP = 'UPDATE') THEN
RETURN NEW;
ELSIF (TG_OP = 'INSERT') THEN
RETURN NEW;
END IF;
RETURN NULL; -- result is ignored since this is an AFTER trigger
EXCEPTION
WHEN others THEN
GET STACKED DIAGNOSTICS
m_errmsg = MESSAGE_TEXT
,m_errcontext = PG_EXCEPTION_CONTEXT
,m_errdetail = PG_EXCEPTION_DETAIL
,m_errhint = PG_EXCEPTION_HINT
,m_errstate = RETURNED_SQLSTATE;
m_errmsg := get_err_msg(m_funcname, m_errmsg, m_errcontext, m_errdetail, m_errhint, m_errstate);
RETURN NULL; -- result is ignored since this is an AFTER trigger
END;
$body$
LANGUAGE 'plpgsql'
VOLATILE
CALLED ON NULL INPUT
SECURITY INVOKER
COST 1;
$QUOTED$ as scriptcode
---------------------------------------
) r
;
end if;
if nv(p_mode) in (0,2)
then
return query
select r.scriptpriority::integer
,r.scripttype::citext
,r.objectname::citext
,r.scriptcode::citext
,r_record.schemaname::citext
from (
select distinct on (r_record.tablename)
355 as scriptpriority
,'create audit trigger' as scripttype
,'t_audit_'||r_record.tablename as objectname
,format($A$DO $TT$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_trigger
WHERE tgname = 't_audit_%2$s'
AND tgrelid = '%1$s.%2$s'::regclass
) THEN
-- Create the trigger if it doesn't exist
CREATE TRIGGER t_audit_%2$s
AFTER INSERT OR UPDATE OR DELETE
ON %1$s.%2$s FOR EACH ROW
EXECUTE PROCEDURE %1$s.ft_audit_%2$s();
END IF;
END;
$TT$;
$A$, r_record.schemaname,r_record.tablename) as scriptcode
) r ;
end if;
end loop;
exception
when others then
GET STACKED DIAGNOSTICS
m_errmsg = MESSAGE_TEXT
,m_errcontext = PG_EXCEPTION_CONTEXT
,m_errdetail = PG_EXCEPTION_DETAIL
,m_errhint = PG_EXCEPTION_HINT
,m_errstate = RETURNED_SQLSTATE;
m_errmsg := get_err_msg(m_funcname, m_errmsg, m_errcontext, m_errdetail, m_errhint, m_errstate);
raise '%',m_errmsg;
end;
$$;

View File

@@ -0,0 +1,173 @@
------------------------[CHANGE ID: 20171] 3/10/2016 10:11----------------
select public.dropall('f_getauditinfo','meta');
create or replace function meta.f_getauditinfo(
p_tablename text
)
returns table(
tablename citext
, tableprefix citext
, columnname citext
, columndescription citext
, columntype integer
, datatype citext
, specialtype integer
, foreigntable citext
, foreigncolumn citext
, foreignpk citext
, schemaname citext
)
language plpgsql stable
as
$$
--columntype:
-- 0=Normal Audited
--, 1=Primary Key
--, 2=Primary Key Audited
--, 3=UserColumn
--, 4=Normal Encrypted Audited
--, 5 = Parent Table Primary Key for lookup (Must give Foreign Table)
--, 6 = Parent Table Primary Key for delete (Must give Foreign Table)
--, 9=View only, used for custom audit. e.g pplan
--specialtype:
-- 0,null = Leave alone.
-- 1=Convert int date to sql date.
-- 2=Convert int time to sql time.,
-- 3=Status Field, --Note that when ColumnType is 3,Status field, the ForeignTable column must have the name of the search sent to F_GetDescription_Code
-- 4=ForeignTable
-- 5=ForeignTable, tableprefix
-- 6 = Blob Column, will compare with size only
-- 7 = Blob Column, will compare with hash only
-- 8 = Hide
declare
--Error Handling--
m_funcname text = 'f_getauditinfo';
m_errmsg text;
m_errcontext text;
m_errdetail text;
m_errhint text;
m_errstate text;
--Error Handling--
m_tablename citext;
m_schemaname citext;
m_results record;
begin
if p_tablename ilike '%.%'
then
m_schemaname = split_part(p_tablename,'.',1);
m_tablename = split_part(p_tablename,'.',2);
else
m_schemaname = '';
m_tablename = p_tablename;
end if;
return query
with tbl as (
select t.table_name
,t.table_schema
from information_schema.tables t
where t.table_schema not ilike 'pg_%'
and t.table_schema not in ('information_schema','meta')
and t.table_type = 'BASE TABLE'
and t.table_name not in (
'atdetail','atevent','eventlog','event_notify','async_jobs','async_broker','async_broker_stat',
'version','upgrades','upgradescript','bt_enums','strmsession','strmsessionchunk',
'upg_options','upgrade_procrun','process_act_list','server_script','generalaudit',
'imapclient_instance','async_schedule','_loginsession','atview','authsession'
)
and t.table_name not ilike 'tmp\_%'
and (
m_tablename::citext = t.table_name::citext
or nv(m_tablename) = ''
)
and (
m_schemaname::citext = t.table_schema::citext
or nv(m_schemaname) = ''
)
) , cols as (
select tbl.*
,exists (select 1
from information_schema.table_constraints ct
inner join information_schema.constraint_column_usage cu on cu.constraint_name = ct.constraint_name
and cu.table_name = ct.table_name
and cu.column_name = c.column_name
where ct.table_schema = c.table_schema
and ct.constraint_type = 'PRIMARY KEY'
and ct.table_name = tbl.table_name
) as isPk
, exists (select 1
from information_schema.table_constraints ct
inner join information_schema.constraint_column_usage cu on cu.constraint_name = ct.constraint_name
and cu.table_name = ct.table_name
and cu.column_name = c.column_name
where ct.table_schema = c.table_schema
and ct.constraint_type = 'FOREIGN KEY'
and ct.table_name = tbl.table_name
) as isFk
,c.column_name::text as column_name
,(case when c.data_type = 'USER-DEFINED' then c.udt_name else c.data_type end)::text as data_type
,f_tableprefix(tbl.table_name::text) as tableprefix
,0::integer as specialtype
from tbl
inner join information_schema.columns c on c.table_name = tbl.table_name
--and c.data_type not in ('USER-DEFINED','ARRAY','geometry','geography') --,'bytea'
--and c.column_name not in ('createdby','createddate','updatedby','updateddate','deletedby','deleteddate')
and c.column_name not ilike '%dropped%'
and c.column_name not in ('prefix','updatecnt')
), processed as (
select *
--,(select from cols b where b.table_name = a.table_name)
from cols a
)
select distinct on (a.table_schema,a.table_name, a.column_name)
a.table_name::citext as tablename
,a.tableprefix::citext as tableprefix
,a.column_name::citext as columnname
,initcap(a.column_name)::citext as columndescription
,(case when a.isPk then 2 else 0 end)::integer as columntype
,a.data_type::citext as datatype
,(case when a.data_type ilike '%bytea%'
and nv(a.specialtype) not in (6,7,8) then 7
else a.specialtype
end)::integer as specialtype
,''::citext as foreigntable
,''::citext as foreigncolumn
,''::citext as foreignpk
,a.table_schema::citext as schemaname
from processed a
where exists (
select 1
from processed a2
where a2.table_name = a.table_name
and a2.table_schema = a.table_schema
and a2.isPk
)
and exists (
select 1
from processed a2
where a2.table_name = a.table_name
and a2.table_schema = a.table_schema
and a2.isPk
and (a2.data_type ilike '%int%' or a2.data_type ilike '%serial%')
)
order by a.table_name, a.column_name
;
exception
when others then
GET STACKED DIAGNOSTICS
m_errmsg = MESSAGE_TEXT
,m_errcontext = PG_EXCEPTION_CONTEXT
,m_errdetail = PG_EXCEPTION_DETAIL
,m_errhint = PG_EXCEPTION_HINT
,m_errstate = RETURNED_SQLSTATE;
m_errmsg := get_err_msg(m_funcname, m_errmsg, m_errcontext, m_errdetail, m_errhint, m_errstate);
raise '%',m_errmsg;
end;
$$;
---select * from meta.f_getauditinfo('syncprocess')

View File

@@ -0,0 +1,90 @@
--select * from dropall('meta.get_table_fields');
CREATE OR REPLACE FUNCTION meta.get_table_fields(
p_schema_name text
,p_table_name text
)
RETURNS JSONB
LANGUAGE plpgsql STABLE
cost 10
SECURITY DEFINER
AS
$$
DECLARE
a_primary_keys citext[];
a_foreign_keys citext[];
BEGIN
SELECT array_agg(kcu.column_name::citext)
FROM information_schema.table_constraints tc
JOIN information_schema.key_column_usage kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
WHERE
tc.constraint_type = 'PRIMARY KEY'
AND tc.table_schema = p_schema_name::citext
AND tc.table_name = p_table_name::citext
INTO a_primary_keys
;
SELECT array_agg(kcu.column_name::citext)
FROM information_schema.table_constraints tc
JOIN information_schema.key_column_usage kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
WHERE
tc.constraint_type = 'FOREIGN KEY'
AND tc.table_schema = p_schema_name::citext
AND tc.table_name = p_table_name::citext
INTO a_foreign_keys
;
return (
SELECT jsonb_object_agg(col.column_name::citext
, jsonb_build_object(
'type', case
when col.data_type::citext = 'USER-DEFINED' then col.udt_name::citext
else col.data_type::citext
end
, 'primary_key', case when col.column_name = any (a_primary_keys) then true else false end
, 'foreign_key', case when col.column_name = any (a_foreign_keys) then true else false end
,'relation', (
select to_jsonb(r)
from (
SELECT DISTINCT
c.table_schema AS parent_schema
, c.table_name AS parent_table
, cc.column_name AS parent_column
--, kcu.column_name AS relation_column
FROM information_schema.table_constraints tc
JOIN information_schema.constraint_column_usage ccu
ON tc.constraint_name = ccu.constraint_name
AND tc.table_schema = ccu.table_schema
JOIN information_schema.key_column_usage kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.referential_constraints rc
ON tc.constraint_name = rc.constraint_name
JOIN information_schema.constraint_column_usage cc
ON rc.unique_constraint_name = cc.constraint_name
JOIN information_schema.constraint_table_usage c
ON rc.unique_constraint_name = c.constraint_name
WHERE
tc.constraint_type = 'FOREIGN KEY'
AND tc.table_schema = p_schema_name::citext
AND tc.table_name = p_table_name::citext
and kcu.column_name = col.column_name
) r
)
)
)
FROM information_schema.columns col
WHERE
col.table_schema = p_schema_name::citext
AND col.table_name = p_table_name::citext
);
END;
$$;
--select meta.get_table_fields('core','masterprocess')

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,369 @@
select * from dropall('migration_init','meta');
CREATE OR REPLACE FUNCTION meta.migration_init(
p_version text default ''
,INOUT p_info jsonb default null
,OUT p_retval integer
,OUT p_errmsg text
)
LANGUAGE plpgsql VOLATILE
SECURITY DEFINER
AS
$$
DECLARE
m_except _except_type;
m_dblink_conn TEXT;
m_dblink_login TEXT;
m_pid integer;
m_result text;
m_gotemplate text;
m_qry text;
m_id_model integer;
BEGIN
m_except.func_name = 'migration_init';
p_retval = 0;
p_errmsg = '';
m_dblink_conn := 'dblink_migration_boot';
m_pid = pg_backend_pid();
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',1,'objecttype',m_except.func_name, 'tm',clock_timestamp()::text)::text);
if p_info is null
then
p_info = jsonb_build_object();
end if;
select f.id_migration_model
from meta.migration_model f
where f.version = p_version
or coalesce(p_version,'') = ''
order by f.changedat desc
limit 1
into m_id_model;
if coalesce(m_id_model,0) = 0
then
raise exception 'Model/File does not exist for version %',p_version;
end if;
-- delete from meta.migration_script s
-- where s.rid_migration_model = m_id_model or s.rid_migration_model is null;
--
-- delete from meta.migration_relation s
-- where s.rid_migration_model = m_id_model or s.rid_migration_model is null;
--
-- delete from meta.migration_index s
-- where s.rid_migration_model = m_id_model or s.rid_migration_model is null;
--
-- delete from meta.migration_table s
-- where s.rid_migration_model = m_id_model or s.rid_migration_model is null;
m_gotemplate = $CQl$do $M$
declare
m_retval integer;
m_errmsg text;
m_info jsonb;
m_file bytea;
m_version text;
m_rid_model integer;
begin
m_version = $CQl$ || quote_literal(p_version) || $CQl$;
m_rid_model = $CQl$ ||m_id_model || $CQl$;
perform log_event($CQl$ || quote_literal(m_except.func_name) || $CQl$ ,'[message]',bt_enum('eventlog','upgrade'),1);
[proc]
end;
$M$;
$CQl$;
select 'host=127.0.0.1 dbname='||r.dbname||' port=' || r.port || ' user='|| r.loginuser ||' password=' || r.password
from f_get_local_conn('migration') r
into m_dblink_login;
if not exists (SELECT T.text from unnest(dblink_get_connections()) T where T.text = m_dblink_conn)
then
PERFORM dblink_connect(m_dblink_conn, m_dblink_login);
end if;
perform log_event(m_except.func_name,format('migrations init with connection %s',m_dblink_conn),bt_enum('eventlog','upgrade'));
PERFORM dblink_exec(m_dblink_conn, format($S$set application_name to 'MIGRATION:%s'; $S$, m_pid), true);
m_qry = replace(replace(m_gotemplate,'[message]','Reading migration models'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_read(m_rid_model) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to read model: %', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
-- raise notice 'Status - Reading migration models: % Qry:%', m_result , m_qry;
---Prime old upgrades
m_qry = replace(replace(m_gotemplate,'[message]','Inspecting database'),'[proc]',$CQl$
if exists (
select 1
from information_schema.tables t
where
t.table_name = 'upgrades'
and t.table_schema = 'public'
)
then
insert into public.upgrades(dbversion,status)
select m.version, 0
from meta.migration_model m
where m.id_migration_model = m_rid_model
and not exists (select 1 from public.upgrades s2 where s2.dbversion = m.version)
;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
--inspect
m_qry = replace(replace(m_gotemplate,'[message]','Inspecting database'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_inspect(m_rid_model,$CQl$ || quote_literal(p_info::text) || $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to inspect database: %', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
-- raise notice 'Status - Inspecting database: % Qry:%', m_result, m_qry;
--Drops
m_qry = replace(replace(m_gotemplate,'[message]','Build Differential (Drops)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_build(m_rid_model,'drop',$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to build differential: % ', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
--Run Drops
m_qry = replace(replace(m_gotemplate,'[message]','Running Migration Scripts (Drops)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_run(m_rid_model,0,200,$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to run differentials: %', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
--Renames
m_qry = replace(replace(m_gotemplate,'[message]','Inspecting database (Renames)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_inspect(m_rid_model,$CQl$ || quote_literal(p_info::text) || $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to inspect database: %', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
-- raise notice 'Status - Inspecting database: % Qry:%', m_result, m_qry;
--Run rename
m_qry = replace(replace(m_gotemplate,'[message]','Build Differential (Renames)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_build(m_rid_model,'rename',$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to build differential: % ', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
--Functions
m_qry = replace(replace(m_gotemplate,'[message]','Build Differential (Renames)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_build(m_rid_model,'functions',$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to build differential: % ', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
m_qry = replace(replace(m_gotemplate,'[message]','Update legacy'),'[proc]',$CQl$
if exists (
select 1
from information_schema.tables t
where
t.table_name = 'upgrades'
and t.table_schema = 'public'
)
then
update public.upgrades s
set totalscripts = r.total
,failedscripts = r.error
,scriptsdone = r.done
from (
select count(1) filter (where s.status <> 4) as total
, count(1) filter (where s.status = 3) as error
, count(1) filter (where s.status = 2 ) as done
from meta.migration_script s
where s.rid_migration_model = m_rid_model
) r
where s.dbversion in (select m.version
from meta.migration_model m
where m.id_migration_model = m_rid_model
);
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
--Run Functions
m_qry = replace(replace(m_gotemplate,'[message]','Running Migration Scripts (Renames)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_run(m_rid_model,0,200,$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to run differentials: %', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
---Rest
m_qry = replace(replace(m_gotemplate,'[message]','Inspecting database (2nd)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_inspect(m_rid_model,$CQl$ || quote_literal(p_info::text) || $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to inspect database: %', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
-- Rest
m_qry = replace(replace(m_gotemplate,'[message]','Build Differential (All)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_build(m_rid_model,'all',$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to build differential: % ', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
-- Run Rest
m_qry = replace(replace(m_gotemplate,'[message]','Running Migration Scripts (2nd)'),'[proc]',$CQl$
select r.p_retval, r.p_errmsg, r.p_info
from meta.migration_run(m_rid_model,0,10000,$CQl$ || quote_literal(p_info::text)|| $CQl$::jsonb) r
into m_retval, m_errmsg,m_info;
if m_retval > 0
then
raise exception 'Failed to run differentials: %', m_errmsg;
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
m_qry = replace(replace(m_gotemplate,'[message]','Update legacy'),'[proc]',$CQl$
if exists (
select 1
from information_schema.tables t
where
t.table_name = 'upgrades'
and t.table_schema = 'public'
)
then
update public.upgrades s
set totalscripts = r.total
,failedscripts = r.error
,scriptsdone = r.done
from (
select count(1) filter (where s.status <> 4) as total
, count(1) filter (where s.status = 3) as error
, count(1) filter (where s.status = 2 ) as done
from meta.migration_script s
where s.rid_migration_model = m_rid_model
) r
where s.dbversion in (select m.version
from meta.migration_model m
where m.id_migration_model = m_rid_model
);
end if;
$CQl$);
m_result = dblink_exec(m_dblink_conn, m_qry, true);
--raise notice 'Status - Running Migration Scripts: % Qry:%', m_result, m_qry;
perform dblink_disconnect(m_dblink_conn);
perform log_event(m_except.func_name,'Migration process completed. Check the scripts status for errors',bt_enum('eventlog','upgrade'));
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',2,'objecttype',m_except.func_name, 'tm',clock_timestamp()::text)::text);
EXCEPTION
WHEN others THEN
GET STACKED DIAGNOSTICS
m_except.message_text = MESSAGE_TEXT
, m_except.pg_exception_context = PG_EXCEPTION_CONTEXT
, m_except.pg_exception_detail = PG_EXCEPTION_DETAIL
, m_except.pg_exception_hint = PG_EXCEPTION_HINT
, m_except.returned_sqlstate = RETURNED_SQLSTATE
, m_except.schema_name = SCHEMA_NAME
, m_except.table_name = TABLE_NAME
, m_except.pg_datatype_name = PG_DATATYPE_NAME
, m_except.constraint_name = CONSTRAINT_NAME
, m_except.column_name = COLUMN_NAME
;
p_errmsg = _except(m_except);
p_retval = 1;
perform log_event(m_except.func_name,format('Migration error: %s',p_errmsg),bt_enum('eventlog','upgrade'));
if exists (SELECT T.text from unnest(dblink_get_connections()) T where T.text = m_dblink_conn)
then
perform dblink_disconnect(m_dblink_conn);
end if;
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',3,'error',p_errmsg,'objecttype',m_except.func_name, 'tm',clock_timestamp()::text)::text);
END;
$$;

View File

@@ -0,0 +1,445 @@
--select * from dropall('migration_inspect','meta');
CREATE OR REPLACE FUNCTION meta.migration_inspect(
p_id_migration_model integer default null
,INOUT p_info jsonb default null
,OUT p_retval integer
,OUT p_errmsg text
)
LANGUAGE plpgsql VOLATILE
SECURITY DEFINER
AS
$$
DECLARE
--Error Handling--
m_funcname text = 'meta.migration_inspect';
m_errmsg text;
m_errcontext text;
m_errdetail text;
m_errhint text;
m_errstate text;
m_retval integer;
--Error Handling--
m_rowcnt integer;
BEGIN
p_retval = 0;
p_errmsg = '';
--perform log_event(m_funcname,'init',bt_enum('eventlog','local notice'));
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',1,'objecttype',m_funcname)::text);
delete from meta.migration_table
where isdb;
insert into meta.migration_table(
rid_migration_model
,tablename
,guid
,schemaname
,isdb
,ismodel
)
select p_id_migration_model
,lower(T.table_name)
,obj_description(format('%s.%s',t.table_schema,t.table_name)::regclass, 'pg_class')
,t.table_schema
,true
,false
from INFORMATION_SCHEMA.tables t
left outer join meta.f_upgrade_table(t.table_schema) tu on lower(tu.tablename) = lower(t.table_name)
where t.table_type = 'BASE TABLE'
and t.table_schema not ilike 'pg_%'
and t.table_schema not ilike 'information_schema%'
and tu.tablename is null
and (
t.table_schema in (
select distinct mt.schemaname
from meta.migration_table mt
where mt.ismodel
)
or t.table_schema = 'public'
)
;
insert into meta.migration_column(rid_migration_table, columnname, columntype, columnlen, precision, prefix, defaultval, indextype, guid, seqseed, ispk,sequence)
with det as (
select
obj_description(format('%s.%s', c.table_schema, c.table_name)::regclass, 'pg_class') as tbl_guid
, lower(c.column_name) as colname
, col_description(format('%s.%s', c.table_schema, c.table_name)::regclass, c.ordinal_position) as col_guid
, case when c.data_type::citext = 'user-defined' then c.udt_name else c.data_type end
|| (case
when c.data_type::citext = 'numeric' then '(' || c.numeric_precision
||
(case when c.numeric_scale = 0 then '' else ',' || c.numeric_scale end)
|| ')'
else ''
end) as coltype
,'(' || c.numeric_precision
||
(case when c.numeric_scale = 0 then '' else ',' || c.numeric_scale end)
|| ')' as numeric_precision_str
, COALESCE(pc.constraint_len,null)::integer as constraint_len
,COALESCE(defval.expr,'') as defaultvalue
,mt.id_migration_table
,c.ordinal_position::int as pos
,exists (
select ct.constraint_name,ct.table_schema,ct.table_name, cu.column_name
from information_schema.table_constraints ct
inner join information_schema.constraint_column_usage cu on cu.constraint_name = ct.constraint_name
and cu.table_name = ct.table_name
and cu.table_schema = ct.table_schema
and cu.table_schema not in ('pg_catalog','')
where ct.constraint_type = 'PRIMARY KEY'
and ct.table_schema = c.table_schema
and ct.table_name = c.table_name
and cu.column_name = c.column_name
) as ispk
from INFORMATION_SCHEMA.columns c
inner join meta.migration_table mt on lower(mt.tablename) = lower(c.table_name)
and lower(mt.schemaname) = lower(c.table_schema)
and mt.isdb
left join lateral (
select pc1.conname
,obj_description(pc1.oid, 'pg_constraint') as constraint_len
from pg_constraint pc1
where pc1.conname = 'chk_' || c.table_name || '_' || c.column_name
limit 1
) pc on true
left join lateral (
SELECT pg_get_expr(pad.adbin, pad.adrelid)::text as expr
FROM pg_attrdef pad
, pg_attribute pat
, pg_class pc
, pg_namespace ns
WHERE
pc.relname = c.table_name
and pc.relnamespace = ns.oid
and ns.nspname::text = c.table_schema
AND pc.oid = pat.attrelid
AND pat.attname = c.column_name
AND pat.attrelid = pad.adrelid
AND pat.attnum = pad.adnum
) defval on true
)
select det.id_migration_table
,det.colname
,det.coltype
,det.constraint_len
,det.numeric_precision_str
,null --prefix
,det.defaultvalue
,'primary key'
,det.col_guid
,null::bigint as seed
, det.ispk
,det.pos
from det
;
----Indexes
with indexes as (
select
i1.id_migration_table
, lower(i1.indexname) as indexname
, i1.schemaname
, i1.tablename
, i1.indisprimary as ispk
, not i1.indisunique as isduplicate
, i1.indisunique or i1.indisprimary as isunique
, i1.ikeyno + 1 as seq
,i1.oid::text as guid
, lower(a.attname) as colname
, row_number() over (partition by i1.indexname order by a.attname) as rnidx
from (
select
obj_description(format('%s.%s', ns.nspname, t.relname)::regclass, 'pg_class') tableident
, t.oid
, c.relname as indexname
, i.indisprimary
, i.indisunique as indisunique
, i.indkey
, unnest(i.indkey) as ikey
, generate_subscripts(i.indkey, 1) as ikeyno
, mt.id_migration_table
, mt.tablename
,mt.schemaname
FROM pg_catalog.pg_class c
inner join pg_catalog.pg_namespace n on n.oid = c.relnamespace
inner join pg_catalog.pg_index i on i.indexrelid = c.oid
inner join pg_catalog.pg_class t on t.oid = i.indrelid
inner join pg_catalog.pg_namespace ns on ns.OID = t.relnamespace
inner join meta.migration_table mt on lower(mt.tablename) = lower(t.relname)
and lower(mt.schemaname) = lower(n.nspname::text)
and mt.isdb
left outer join information_schema.table_constraints cs on cs.constraint_type not in ('CHECK', 'FOREIGN KEY')
and cs.constraint_name = c.relname
and cs.table_name = t.relname
WHERE c.relkind::citext = 'i'
) i1
inner join pg_attribute a on a.attrelid = i1.oid
and a.attnum = ikey
), insdex as (
insert into meta.migration_index( rid_migration_model, rid_migration_table, indexname, ispk, isduplicate, sequence, guid, isdb,ismodel, isunique)
select p_id_migration_model, r.id_migration_table,r.indexname,r.ispk,nv(r.isduplicate,false),r.seq, r.guid,true,false, nv(r.isunique,false)
from indexes r
where r.rnidx = 1
returning id_migration_index,rid_migration_table,indexname,guid
), inscol as (
insert into meta.migration_index_col(rid_migration_index, rid_migration_column_parent, options, sequence)
select i.id_migration_index, mc.id_migration_column, null, r.seq
from indexes r
inner join insdex i on i.rid_migration_table = r.id_migration_table
and i.indexname = r.indexname
inner join meta.migration_column mc on mc.rid_migration_table = r.id_migration_table
and lower(mc.columnname) = lower(r.colname)
returning *
)
select count(1)
into m_rowcnt
;
----Relations
with cnt as (
select T.relname as parent_tablename
,tns.nspname as parent_schemaname
,t1.relname as child_tablename
,t1ns.nspname as child_schemaname
,obj_description(format('%s.%s',tns.nspname,t.relname)::regclass, 'pg_class') as parent_guid
,obj_description(format('%s.%s',t1ns.nspname,t1.relname) ::regclass, 'pg_class') as child_guid
,t.oid parent_oid
,t1.oid child_oid
,c.confupdtype
,c.confdeltype
,c.conname as constraint_name
,par.key as parentcolumn
,generate_subscripts(c.conkey,1) rpkeyno
,child.key as childcolumn
,generate_subscripts(c.conkey,1) rckeyno
from pg_constraint c
inner join pg_catalog.pg_class t on t.oid = c.conrelid
inner join pg_catalog.pg_namespace tns on tns.oid = t.relnamespace
inner join pg_catalog.pg_class t1 on t1.oid = c.confrelid
inner join pg_catalog.pg_namespace t1ns on t1ns.oid = t1.relnamespace
cross join unnest(c.conkey) par(key)
cross join unnest(c.confkey) child(key)
where c.contype = 'f'
), det as (
select
r1.parent_guid
, lower(r1.constraint_name) as constraint_name
, r1.child_guid
, (case r1.confdeltype::citext
when 'r' then 'restrict_server' --restrict server
when 'c' then 'cascade_server' --cascade server
when 'n' then 'set null'
when 'd' then 'set default'
else 'no_action'
end) as deleteconstraint
, (case r1.confupdtype::citext
when 'r' then 'restrict_server' --restrict server
when 'c' then 'cascade_server' --cascade server
when 'n' then 'set null'
when 'd' then 'set default'
else 'no_action'
end) as updateconstraint
, lower(ap.attname) as parent_column
, lower(ac.attname) as child_column
, r1.rpkeyno
, row_number() over (partition by constraint_name order by ap.attname, ac.attname) as rncnt
,r1.parent_tablename
,r1.parent_schemaname
,r1.child_tablename
,r1.child_schemaname
,mtp.id_migration_table as id_migration_table_parent
,mtc.id_migration_table as id_migration_table_child
from cnt r1
inner join pg_attribute ap on ap.attrelid = r1.parent_oid
and ap.attnum = parentcolumn
inner join pg_attribute ac on ac.attrelid = r1.child_oid
and ac.attnum = childcolumn
inner join meta.migration_table mtp on mtp.tablename = r1.parent_tablename
and mtp.schemaname = r1.parent_schemaname
and mtp.isdb
inner join meta.migration_table mtc on mtc.tablename = r1.child_tablename
and mtc.schemaname = r1.child_schemaname
and mtc.isdb
order by
r1.child_tablename
, r1.constraint_name
, r1.rpkeyno
, r1.rckeyno
), ins as (
insert into meta.migration_relation(rid_migration_model ,rid_migration_table_parent, rid_migration_table_child, relationname, updateconstraint
, deleteconstraint, sequence,isdb,ismodel
)
select p_id_migration_model, det.id_migration_table_parent, det.id_migration_table_child, det.constraint_name,det.updateconstraint, det.deleteconstraint,null
,true,false
from det
where det.rncnt = 1
returning *
), inscol as (
insert into meta.migration_relation_col(rid_migration_relation, rid_migration_column_parent, rid_migration_column_child, sequence)
select ins.id_migration_relation, colp.id_migration_column, colc.id_migration_column
,det.rpkeyno
from det
inner join ins on ins.relationname = det.constraint_name
and ins.rid_migration_table_parent = det.id_migration_table_parent
and ins.rid_migration_table_child = det.id_migration_table_child
inner join meta.migration_column colc on colc.rid_migration_table = ins.rid_migration_table_child
and colc.columnname = det.child_column
inner join meta.migration_column colp on colp.rid_migration_table = ins.rid_migration_table_child
and colc.columnname = det.parent_column
returning *
)
select count(1)
from inscol
into m_rowcnt
;
---Load all triggers
insert into meta.migration_object(rid_migration_model,rid_migration_table, objecttype,objectname, schema, ismodel, isdb)
select distinct on (mt.id_migration_table,tr.trigger_name)
p_id_migration_model
,mt.id_migration_table
,'trigger'
,tr.trigger_name
,mt.schemaname
,false
,true
from INFORMATION_SCHEMA.triggers tr
inner join meta.migration_table mt on mt.isdb
and lower(mt.schemaname) = lower(tr.event_object_schema)
and lower(mt.tablename) = lower(tr.event_object_table)
;
---Load all sequences
insert into meta.migration_object(rid_migration_model,objecttype,rid_migration_table,objectname, schema, ismodel, isdb)
select distinct on (seq.sequence_name, seq.schemaname,mt.id_migration_table)
p_id_migration_model
,'sequence'
,mt.id_migration_table
,seq.sequence_name
,seq.schemaname
,false
,true
from (
SELECT seqclass.relname AS sequence_name,
ns.nspname as schemaname,
depclass.relname AS table_name,
(
select array_agg(attrib.attname)
from pg_attribute attrib
where attrib.attnum = dep.refobjsubid
AND attrib.attrelid = dep.refobjid
) as cols
FROM pg_class AS seqclass
join pg_namespace ns on ns.oid = seqclass.relnamespace
and lower(ns.nspname::text) in (
select distinct mt.schemaname
from meta.migration_table mt
where mt.isdb
)
JOIN pg_sequence AS seq
ON ( seq.seqrelid = seqclass.relfilenode )
left outer JOIN pg_depend AS dep
ON ( seq.seqrelid = dep.objid )
left outer JOIN pg_class AS depclass
ON ( dep.refobjid = depclass.relfilenode )
) seq
left outer join meta.migration_table mt on mt.isdb
and lower(mt.schemaname) = lower(seq.schemaname)
and lower(mt.tablename) = lower(seq.table_name)
;
insert into meta.migration_object(rid_migration_model,objecttype,rid_migration_column,rid_migration_table,objectname, schema, ismodel, isdb,body)
SELECT distinct on (col.id_migration_column, tbl.id_migration_table)
p_id_migration_model
,'check_constraint'
,col.id_migration_column
,tbl.id_migration_table
,conname AS constraint_name,
ns.nspname AS schema,
false,
true,
pg_get_constraintdef(c.oid) AS constraint_definition
FROM pg_constraint c
JOIN pg_attribute a ON a.attrelid = c.conrelid
AND a.attnum = ANY(c.conkey)
inner join pg_class tc on tc.oid = c.conrelid
inner join pg_namespace ns on ns.oid = tc.relnamespace
inner join meta.migration_table tbl on tbl.isdb
and lower(tbl.schemaname) = lower( ns.nspname)
and lower(tbl.tablename) = lower( tc.relname)
inner join meta.migration_column col on col.rid_migration_table = tbl.id_migration_table
and lower(col.columnname) = lower(a.attname)
WHERE contype = 'c';
---Views on a table
insert into meta.migration_object(rid_migration_model,rid_migration_table, objecttype,objectname, schema, ismodel, isdb)
select distinct on (v.oid,mt.id_migration_table)
p_id_migration_model
,mt.id_migration_table
,'view'
,v.oid::regclass AS view
,mt.schemaname
,false
,true
FROM pg_depend AS d -- objects that depend on the table
JOIN pg_rewrite AS r -- rules depending on the table
ON r.oid = d.objid
JOIN pg_class AS v -- views for the rules
ON v.oid = r.ev_class
inner join pg_class as tbl on tbl.oid = d.refobjid
inner join pg_namespace ns on ns.oid = tbl.relnamespace
inner join meta.migration_table mt on mt.isdb
and lower(mt.schemaname) = lower(ns.nspname)
and lower(mt.tablename) = lower(tbl.relname)
WHERE v.relkind = 'v' -- only interested in views
AND d.classid = 'pg_rewrite'::regclass
AND d.refclassid = 'pg_class'::regclass
AND d.deptype = 'n' -- normal dependency
;
--All functions
insert into meta.migration_object(rid_migration_model,objectname, objecttype, schema, body, ismodel, isdb)
SELECT distinct
p_id_migration_model
,pp.proname || format('(%s)', pg_get_function_identity_arguments(oid)), --function name.
'function',
pp.pronamespace::regnamespace::text AS schema, --function located schema
null,
false,
true
-- pg_get_functiondef(oid), --function def
-- pg_get_function_arguments(oid), --(including default values).
-- pg_get_function_identity_arguments(oid), --This form omits default values.
-- pg_get_function_result(oid), --Reconstructs the RETURNS clause of a function
FROM
pg_proc pp
WHERE
pp.pronamespace::regnamespace::text not in ('pg_catalog','information_schema')
and pp.pronamespace::regnamespace::text not ilike 'pg_%'
;
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',2,'objecttype',m_funcname)::text);
EXCEPTION
WHEN others THEN
GET STACKED DIAGNOSTICS
m_errmsg = MESSAGE_TEXT
,m_errcontext = PG_EXCEPTION_CONTEXT
,m_errdetail = PG_EXCEPTION_DETAIL
,m_errhint = PG_EXCEPTION_HINT
,m_errstate = RETURNED_SQLSTATE;
p_retval = 1;
p_errmsg = format('%s Context %s State: %s', m_errmsg, m_errcontext, m_errstate);
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',3,'error',m_errmsg,'objecttype',m_funcname)::text);
END;
$$;

View File

@@ -0,0 +1,773 @@
--select * from dropall('migrations_read','meta');
CREATE OR REPLACE FUNCTION meta.migration_read(
p_id_migration_model integer default null
,OUT p_retval integer
,OUT p_errmsg text
,OUT p_info jsonb
)
LANGUAGE plpgsql VOLATILE
SECURITY DEFINER
AS
$$
DECLARE
m_funcname text = 'migrations_read';
m_errmsg text;
m_errcontext text;
m_errdetail text;
m_errhint text;
m_errstate text;
m_retval integer;
m_id_migration_model integer;
m_payload text;
m_tm timestamp;
m_xml xml;
m_json json;
j_err jsonb;
BEGIN
m_tm = clock_timestamp();
select f.id_migration_model
,convert_from(case when f_iscompressed(f.modelfile) = 'gzip' then pl_gzip_bytes(0,f.modelfile) else f.modelfile end, 'utf8')
from meta.migration_model f
where f.id_migration_model = p_id_migration_model
or p_id_migration_model is null
order by f.version desc , f.id_migration_model desc
into m_id_migration_model, m_payload;
if m_payload ilike '%<%>%'
then
m_xml = m_payload::xml;
raise notice 'XML File set';
elseif m_payload ilike '%{%}%'
then
raise notice 'JSON File set';
m_json = m_payload::json;
end if;
if m_xml is not null
then
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',1,'objecttype',m_funcname)::text);
p_info = jsonb_build_object('format','xml');
delete from meta.migration_table
where ismodel;
delete from meta.migration_index
where ismodel;
delete from meta.migration_relation_col d
where d.rid_migration_relation in (
select r.id_migration_relation
from meta.migration_relation r
where r.ismodel
);
delete from meta.migration_relation
where ismodel;
raise notice 'inserting meta.migration_table @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
drop table if exists tmp_meta_migration_table;
create temp table tmp_meta_migration_table as
select
lower((xpath('/table/tableident/text()', node.x))[1]::text)::citext as tableident
, lower((xpath('/table/prefix/text()', node.x))[1]::text)::citext as tableprefix
, lower((xpath('/table/tablename/text()', node.x))[1]::text)::citext as tablename
, lower(coalesce((xpath('/table/schema/text()', node.x))[1]::text,(xpath('/table/schemaname/text()', node.x))[1]::text)) as schemaname
, lower((xpath('/table/version/text()', node.x))[1]::text)::citext as version
, coalesce((xpath('/table/@seq', node.x))[1]::text,'0')::integer + 1 as schemapriority
, node.x as xml
from unnest(xpath('/root/tables/table', m_xml)) node(x)
;
insert into meta.migration_table(rid_migration_model ,guid,prefix, tablename, schemaname, version,ismodel, isdb,schemapriority)
select distinct on (r.tableident,r.tablename,r.tableprefix,r.schemaname)
m_id_migration_model
,r.tableident
,r.tableprefix
,r.tablename
,r.schemaname
,r.version
,true
,false
,r.schemapriority
from tmp_meta_migration_table r
;
raise notice 'inserting meta.migration_column @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
insert into meta.migration_column(rid_migration_table, columnname, guid, columntype,indextype,seqseed, columnlen, precision, defaultval)
select distinct on (r.id_migration_table,r.columnname,r.columnident)
r.id_migration_table,r.columnname,r.columnident,r.columntype,r.indextype,r.seq_seed,r.columnlen,r.precision
,r.defaultval
from (
select
mt.id_migration_table
, lower((xpath('/column/columnname/text()', col.x))[1]::text) as columnname
, (xpath('/column/columnident/text()', col.x))[1]::text as columnident
, (xpath('/column/columntype/text()', col.x))[1]::text as columntype
, (xpath('/column/indextype/text()', col.x))[1]::text as indextype
, coalesce((xpath('/column/seed/text()', col.x))[1]::text,'0')::bigint as seq_seed
, coalesce((xpath('/column/columnlen/text()', col.x))[1]::text, '0')::integer as columnlen
, coalesce((xpath('/column/precision/text()', col.x))[1]::text, '')::text as precision
, coalesce((xpath('/column/defaultval/text()', col.x))[1]::text, '')::text as defaultval
from tmp_meta_migration_table tbl
cross join unnest(xpath('/table/columns/column', tbl.xml)) col(x)
inner join meta.migration_table mt on mt.ismodel
and mt.rid_migration_model = m_id_migration_model
-- and lower(mt.guid) = lower((xpath('/table/tableident/text()', tbl.x))[1]::text)
and lower(mt.tablename) = lower(tbl.tablename)
and lower(mt.schemaname) =lower(tbl.schemaname)
) r
;
raise notice 'inserting meta.migration_index @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
insert into meta.migration_index(rid_migration_model,rid_migration_table, indexname, indextype, ispk,isduplicate, ispartial,partialstr, isunique, sequence, guid,ismodel)
select distinct on (r.id_migration_table,r.indexname)
m_id_migration_model
,r.id_migration_table
,r.indexname
,null
,r.indexprimary in ('1','true')
,r.indexduplicate in ('1','true')
,length(r.indexpartial) > 3
,r.indexpartial
,(not coalesce(r.indexduplicate,'') in ('1','true') and (r.indexunique in ('1','true') or r.indexprimary in ('1','true')))
,row_number() over (order by r.indexname)
,r.indexname
,true
from (
select
mt.id_migration_table
, mt.schemaname
, mt.tablename
, lower((xpath('/index/indexname/text()', idx.x))[1]::text) as indexname
, coalesce(lower((xpath('/index/indexprimary/text()', idx.x))[1]::text),'') as indexprimary
, coalesce(lower((xpath('/index/indexduplicate/text()', idx.x))[1]::text),'') as indexduplicate
, coalesce(lower((xpath('/index/indexpartial/text()', idx.x))[1]::text),'') as indexpartial
, coalesce(lower((xpath('/index/indexunique/text()', idx.x))[1]::text),'') as indexunique
from tmp_meta_migration_table tbl
cross join unnest(xpath('/table/indexes/index', tbl.xml)) idx(x)
inner join meta.migration_table mt on mt.ismodel
and mt.rid_migration_model = m_id_migration_model
and lower(mt.tablename) = lower(tbl.tablename)
and lower(mt.schemaname) =lower(tbl.schemaname)
) r
;
update meta.migration_index u
set isunique = true
where u.indexname ilike 'uk_%'
and not u.ispk
and not u.ispartial
and not u.isduplicate
;
-- update meta.migration_index u
-- set ispartial = true
-- where u.indexname ilike 'k_%'
-- and not u.isunique
-- and not u.ispk
-- ;
insert into meta.migration_index_col(rid_migration_index,rid_migration_column_parent,sequence)
select distinct on (r.id_migration_index,r.id_migration_column)
r.id_migration_index
,r.id_migration_column
,r.seq
from (
select
midx.id_migration_index
,mc.id_migration_column
,coalesce((xpath('/indexcolumn/@seq', idxcol.x))[1]::text,'0')::integer as seq
from tmp_meta_migration_table tbl
cross join unnest(xpath('/table/indexes/index', tbl.xml)) idx(x)
inner join meta.migration_table mt on mt.ismodel
and mt.rid_migration_model = m_id_migration_model
and lower(mt.tablename) = lower(tbl.tablename)
and lower(mt.schemaname) =lower(tbl.schemaname)
inner join meta.migration_index midx on midx.rid_migration_table = mt.id_migration_table
and midx.indexname = lower((xpath('/index/indexname/text()', idx.x))[1]::text)
cross join unnest(xpath('/index/indexcolumns/indexcolumn', idx.x)) idxcol(x)
inner join meta.migration_column mc on mt.id_migration_table = mc.rid_migration_table
and lower(mc.columnname) = lower((xpath('/indexcolumn/text()', idxcol.x))[1]::text)
) r
;
raise notice 'inserting meta.migration_relation temp table @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
drop table if exists tmp_meta_gigration_relation;
create temp table tmp_meta_gigration_relation as
select
tbl.tablename
,tbl.schemaname
, lower((xpath('/relation/childtable/text()', rel.x))[1]::text) as childtable
, lower((xpath('/relation/relationname/text()', rel.x))[1]::text) as relationname
, lower((xpath('/relation/relationguid/text()', rel.x))[1]::text) as relationguid
, lower((xpath('/relation/deleteconstraint/text()', rel.x))[1]::text) as deleteconstraint
, lower((xpath('/relation/updateconstraint/text()', rel.x))[1]::text) as updateconstraint
,rel.x as relation
from tmp_meta_migration_table tbl
cross join unnest(xpath('/table/relations/relation', tbl.xml)) rel(x)
;
raise notice 'inserting meta.migration_relation insert @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
insert
into meta.migration_relation( rid_migration_model
, rid_migration_table_parent
, rid_migration_table_child
, relationname
, guid
, updateconstraint
, deleteconstraint
, sequence
, ismodel)
select m_id_migration_model
,mt.id_migration_table
,ct.id_migration_table as rid_child_table
,src.relationname
,src.relationguid
,src.updateconstraint
,src.deleteconstraint
,row_number() over (order by src.relationname)
,true
from tmp_meta_gigration_relation as src
inner join meta.migration_table mt on mt.ismodel
and mt.rid_migration_model = m_id_migration_model
and lower(mt.tablename) = lower(src.tablename)
and lower(mt.schemaname) = lower(src.schemaname)
inner join meta.migration_table ct on ct.ismodel
and ct.rid_migration_model = m_id_migration_model
and lower(ct.schemaname) = lower(mt.schemaname)
and lower(ct.tablename) = lower(src.childtable)
;
raise notice 'inserting meta.migration_relation_col @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
insert into meta.migration_relation_col(rid_migration_relation, rid_migration_column_parent, rid_migration_column_child, sequence)
select mrel.id_migration_relation
, parcol.id_migration_column
, cldcol.id_migration_column
,coalesce((xpath('/keyfields/@seq', key.x))[1]::text,'0')::integer as seq
from tmp_meta_gigration_relation src
inner join meta.migration_table mt on mt.ismodel
and mt.rid_migration_model = m_id_migration_model
and lower(mt.tablename) = lower(src.tablename)
and lower(mt.schemaname) = lower(src.schemaname)
inner join meta.migration_relation mrel on mrel.rid_migration_table_parent = mt.id_migration_table
and lower(mrel.relationname) = lower(src.relationname)
cross join unnest(xpath('/relation/keyfields', src.relation)) key(x)
inner join meta.migration_column parcol on mrel.rid_migration_table_parent = parcol.rid_migration_table
and lower(parcol.columnname) = lower((xpath('/keyfields/parentcolumn/text()', key.x))[1]::text)
inner join meta.migration_column cldcol on mrel.rid_migration_table_child = cldcol.rid_migration_table
and lower(cldcol.columnname) = lower((xpath('/keyfields/childcolumn/text()', key.x))[1]::text)
;
raise notice 'inserting meta.migration_object @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
delete from meta.migration_object
where ismodel;
insert into meta.migration_object(rid_migration_model,objecttype,objectname, schema, version, checksum, sequence, priority, guid, body, ismodel, isdb)
select m_id_migration_model
,format('script:%s',r.scripttype)
,format('%s [%s]',r.scriptname,r.id_scriptcode)
,r.dbschema,r.version
, case when coalesce(r.scriptchecksum) <> '' then r.scriptchecksum else encode(sha256(convert_to(r.scriptcode,'utf8')),'hex') end
, r.sequence, r.priority, r.id_scriptcode::text,r.scriptcode,true,false
from (
select (xpath('/script/priority/text()', node.x))[1]::text::integer as priority
, (xpath('/script/sequence/text()', node.x))[1]::text::integer as sequence
, lower((xpath('/script/scriptname/text()', node.x))[1]::text)::citext as scriptname
, lower((xpath('/script/scripttype/text()', node.x))[1]::text)::citext as scripttype
, lower((xpath('/script/dbschema/text()', node.x))[1]::text)::citext as dbschema
, lower((xpath('/script/programname/text()', node.x))[1]::text)::citext as programname
, lower((xpath('/script/version/text()', node.x))[1]::text)::citext as version
, lower((xpath('/script/scriptchecksum/text()', node.x))[1]::text)::citext as scriptchecksum
, xml_extract_value('/script/code', node.x)::text as scriptcode
,'dct' as source
,(xpath('/script/id_scriptcode/text()', node.x))[1]::text::integer as id_scriptcode
from unnest(xpath('/root/scripts/script', m_xml )) node(x)
) r
;
elsif m_json is not null
then
p_info = jsonb_build_object('format','json');
raise exception 'Not yet supported';
else
p_info = jsonb_build_object('format','unknown');
raise exception 'Unsupported input file, Content: %', substr(m_payload, 1,20);
end if;
insert into meta.migration_column(rid_migration_table, columnname, columntype, guid)
select distinct on (t.id_migration_table)
t.id_migration_table, 'updatecnt', 'integer',format('updatecnt_%s', t.id_migration_table)
from meta.migration_table t
left outer join meta.migration_column c on c.rid_migration_table = t.id_migration_table
and c.columnname = 'updatecnt'
where t.ismodel
and t.tablename not in (
select uut.tablename
from meta.f_upgrade_table() uut
)
and c.id_migration_column is null
;
raise notice 'updates section @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
--Set primary key field from indexes
update meta.migration_column u
set ispk = true
from meta.migration_column c
inner join meta.migration_table t on t.id_migration_table = c.rid_migration_table
and t.ismodel
inner join meta.migration_index idx on idx.rid_migration_table = t.id_migration_table
inner join meta.migration_index_col idxcol on idxcol.rid_migration_index = idx.id_migration_index
and idxcol.rid_migration_column_parent = c.id_migration_column
where idx.ispk
and u.id_migration_column = c.id_migration_column
;
--Set length for strings
update meta.migration_column
set columnlen = reverse(substr(reverse(columntype),2,strpos(reverse(columntype),'(')-2))::integer
- (case when columntype ilike '%cstring%' then 1 else 0 end)
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
and columntype ilike '%string%'
;
---Set the length and precision values
update meta.migration_column u
set columnlen = case when coalesce(u.columnlen,0) = 0 and (r.precision > 0 or r.scale > 0)
then coalesce(r.precision,0) + coalesce(r.scale,0)
else u.columnlen
end
,precision = format('%s,%s',r.precision,r.scale)
from (
select id_migration_column
,substring(columntype FROM '\((\d+),(\d+)\)')::integer AS precision
, substring(columntype FROM '\(\d+,(\d+)\)')::integer AS scale
from meta.migration_column
where
rid_migration_table in (
select t.id_migration_table from meta.migration_table t where t.ismodel
)
and columntype ilike '%(%)'
) r
where u.id_migration_column = r.id_migration_column
;
--Set default values for prefixes
update meta.migration_column u
set defaultval = (
select quote_literal(upper(t.prefix))
from meta.migration_table t
where t.id_migration_table = u.rid_migration_table
and t.ismodel
and nv(t.prefix) <> ''
)
where u.rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
and u.columnname = 'prefix'
and coalesce(u.defaultval,'') = ''
;
--Set GUID field types. e.g. text, uuid
with option as (
select name, value
from meta.migration_option
where name = 'guidtype'
)
update meta.migration_column u
set columntype = option.value
,defaultval = case when nv(u.defaultval) = '' then 'newid()' else u.defaultval end
from option
where rid_migration_table in (
select t.id_migration_table
from meta.migration_table t
where t.ismodel
and not exists (
select 1
from meta.migration_option o
cross join regexp_split_to_table(o.value, ',') rex(v)
where o.name = 'textguid'
and rex.v::citext = t.tablename
and t.schemaname = 'public'
)
)
and (u.columnname in ('guid','uuid')
or u.columnname ilike 'guid%'
)
and u.columntype is distinct from option.value
;
--Limit length constraints
with option as (
select name, value::numeric as numvalue
from meta.migration_option
where name = 'max_constraint'
and value ~ '^-?[0-9]+(\.[0-9]+)?$'
)
update meta.migration_column u
set columnlen = 0
from option
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
and u.columnlen >= option.numvalue
;
--Force names if json type options
with option as (
select name, value
from meta.migration_option
where name = 'settype_names_jsonb'
and value is not null
)
update meta.migration_column u
set columntype = 'jsonb'
from option
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
and lower(u.columnname) in (
SELECT lower(t.v) from regexp_split_to_table(option.value, ',') t(v)
)
;
--Convert the program types to postgres types
update meta.migration_column u
set columntype = meta.f_datatype_map(u.columntype)
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
and meta.f_datatype_map(u.columntype) not ilike '%unknown%'
;
update meta.migration_column u
set columntype = case when u.columntype in ('date','text','citext') and (u.columnname ilike '%datetime%' or u.columnname ilike '%timestamp%')
then 'timestamp'
else u.columntype
end
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
and meta.f_datatype_map(u.columntype) not ilike '%unknown%'
;
--Larges objects has no lengths
update meta.migration_column u
set columnlen = 0
where rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
and lower(u.columntype) in ('blob,0','jsonb','json','blob','bytea')
;
j_err = null;
select jsonb_agg(
jsonb_build_object('tablename',t.tablename,'schemaname',t.schemaname,'word', kw.word)
)
from meta.migration_table t
inner join pg_get_keywords() kw on lower(kw.word) = lower(t.tablename)
and lower(kw.catdesc::text) = 'reserved'
where t.ismodel
into j_err
;
if jsonb_typeof(j_err) = 'array'
then
p_info = p_info || jsonb_build_object('table_reserved_words',j_err) ;
end if;
j_err = null;
select jsonb_agg(
jsonb_build_object('columnname',u.columnname,'tablename',t.tablename,'schemaname',t.schemaname,'word', kw.word)
)
from meta.migration_column u
inner join meta.migration_table t on t.id_migration_table = u.rid_migration_table
inner join pg_get_keywords() kw on lower(kw.word) = lower(u.columnname)
and lower(kw.catdesc::text) = 'reserved'
where u.rid_migration_table in (select t.id_migration_table from meta.migration_table t where t.ismodel)
into j_err
;
if jsonb_typeof(j_err) = 'array'
then
p_info = p_info || jsonb_build_object('column_reserved_words',j_err) ;
end if;
----Set the default value to the identity if the pk type is identity
update meta.migration_column u
set defaultval = r.def
from (
select
c.id_migration_column
,format($S$nextval('%s.identity_%s_%s'::regclass)$S$,t.schemaname,t.tablename,c.columnname) as def
from meta.migration_table t
inner join meta.migration_column c on c.rid_migration_table = t.id_migration_table
where
t.ismodel
and c.ispk
and nv(c.defaultval) = ''
and c.indextype = 'identity'
) r
where u.id_migration_column = r.id_migration_column;
update meta.migration_table u
set ismodel = false
where format('%s_%s',u.schemaname, u.tablename) in (
select format('%s_%s',split_part(o.name,':',2)
,t.name
)
from meta.migration_option o
cross join regexp_split_to_table(o.value,',') t(name)
where o.name ilike 'exclude:%'
and t.name <> ''
)
and u.ismodel
;
raise notice 'duplicates section @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
--Move duplicate tables that exists in the settings to their default location. Move the keys as well.
with move as (
select
t1.schemaname
, t1.tablename
, t1.id_migration_table as id_migration_table_dest
, t2.schemaname
, t2.tablename
, t2.id_migration_table as id_migration_table_src
,format('%s.%s',t1.schemaname, t1.tablename) as named
from meta.migration_table t1
inner join meta.migration_table t2 on t2.ismodel
and t2.tablename = t1.tablename
and t2.schemaname is distinct from t1.schemaname
where
t1.ismodel
--and not t1.isdb
and (
format('%s.%s',t1.schemaname, t1.tablename) in (
select format('%s.%s',o.value,lower(split_part(o.name, ':', 2)))
from meta.migration_option o
where o.name ilike 'default_schema:%'
)
)
), cols as (
update meta.migration_column u
set rid_migration_table = m.id_migration_table_dest
from move m
inner join meta.migration_column c1 on c1.rid_migration_table = m.id_migration_table_src
and c1.columnname not in (
select c2.columnname
from meta.migration_column c2
where c2.rid_migration_table = m.id_migration_table_dest
)
where u.id_migration_column = c1.id_migration_column
returning *
), relations1 as (
update meta.migration_relation u
set rid_migration_table_parent = m.id_migration_table_dest
,relationname = format('%s_merged',u.relationname)
from move m
where u.rid_migration_table_parent = m.id_migration_table_src
returning *
), relations2 as (
update meta.migration_relation u
set rid_migration_table_child = m.id_migration_table_dest
,relationname = format('%s_merged',u.relationname)
from move m
where u.rid_migration_table_child = m.id_migration_table_src
returning *
), relationscols as (
update meta.migration_relation_col u
set rid_migration_column_child = mc2.id_migration_column
from move m
inner join meta.migration_column mc on mc.rid_migration_table = m.id_migration_table_src
inner join meta.migration_column mc2 on mc2.rid_migration_table = m.id_migration_table_dest
and mc2.columnname = mc.columnname
inner join meta.migration_relation_col rc on rc.rid_migration_column_child = mc.id_migration_column
where u.id_migration_relationcol = rc.id_migration_relationcol
), relationscols2 as (
update meta.migration_relation_col u
set rid_migration_column_parent = mc2.id_migration_column
from move m
inner join meta.migration_column mc on mc.rid_migration_table = m.id_migration_table_src
inner join meta.migration_column mc2 on mc2.rid_migration_table = m.id_migration_table_dest
and mc2.columnname = mc.columnname
inner join meta.migration_relation_col rc on rc.rid_migration_column_parent = mc.id_migration_column
where u.id_migration_relationcol = rc.id_migration_relationcol
), idx as (
update meta.migration_index u
set rid_migration_table = m.id_migration_table_dest
from move m
where u.rid_migration_table = m.id_migration_table_src
returning *
), idxcols as (
update meta.migration_index_col u
set rid_migration_column_parent = col.id_migration_column
from move m
inner join meta.migration_column col on col.rid_migration_table = m.id_migration_table_src
inner join meta.migration_column col2 on col2.rid_migration_table = m.id_migration_table_dest
and col2.columnname = col.columnname
where u.rid_migration_column_parent = col.id_migration_column
)
update meta.migration_table u
set ismodel = false
from move mv
where u.id_migration_table = mv.id_migration_table_src;
update meta.migration_index u
set indexname = format('%s_%s_%s',u.indexname,tbl.schemaname,tbl.tablename)
from meta.migration_table tbl
where u.rid_migration_table = tbl.id_migration_table
and u.indexname not ilike '%' || tbl.schemaname ||'%' || tbl.tablename || '%s'
;
--disable and report duplicates. Take the high priority schema
with dup as (
select
t.tablename as tablename_src
, t2.tablename as tablename_dest
, t.schemaname as schemaname_src
, t2.schemaname as schemaname_dest
, t.id_migration_table as id_migration_table_dest
, t2.id_migration_table as id_migration_table_src
from meta.migration_table t
inner join meta.migration_table t2 on t2.ismodel
and t.tablename = t2.tablename
and t.schemaname is distinct from t2.schemaname
and t.schemapriority < t2.schemapriority
where t.ismodel
and exists (
select *
from meta.migration_option o
where o.name ilike 'default_schema:%'
and (split_part(o.name, ':', 2) = t.tablename
or split_part(o.name, ':', 2) = t2.tablename
)
)
), upd as (
update meta.migration_table u
set ismodel = false
from dup
where u.id_migration_table = dup.id_migration_table_src
returning *
)
select jsonb_agg(to_jsonb(dup))
from dup
into m_json;
insert into meta.table_prefix(schemaname,tablename, prefix)
select distinct t.schemaname,t.tablename,t.prefix
from meta.migration_table t
where t.ismodel
and coalesce(t.prefix,'') <> ''
and not exists (
select 1 from meta.table_prefix p where p.schemaname = t.schemaname and p.tablename = t.tablename
);
update meta.table_prefix u
set prefix = t.prefix
from meta.migration_table t
where t.ismodel
and u.tablename = t.tablename
and u.schemaname = t.schemaname
and u.prefix is distinct from t.prefix
;
update meta.migration_relation u
set ismodel = false
from (
select max(mr.rid_migration_table_child) as rid_migration_table_child
, max(mr.rid_migration_table_parent) as rid_migration_table_parent
, count(1) as cnt
, array_agg(mr.id_migration_relation) as a_id_migration_relation
, min(mr.id_migration_relation) as id_migration_relation_keep
, array_agg(mr.relationname)
,string_agg(c1.columnname,'_')
from meta.migration_relation mr
inner join meta.migration_relation_col rc on rc.rid_migration_relation = mr.id_migration_relation
inner join meta.migration_table t1 on t1.id_migration_table = mr.rid_migration_table_parent
inner join meta.migration_table t2 on t2.id_migration_table = mr.rid_migration_table_child
inner join meta.migration_column c1 on c1.id_migration_column = rc.rid_migration_column_parent
inner join meta.migration_column c2 on c2.id_migration_column = rc.rid_migration_column_child
where mr.ismodel
group by t1.schemaname, t1.tablename,t2.schemaname,t2.tablename,c1.columnname,c2.columnname
having count(1) > 1
) r
where u.id_migration_relation = any(a_id_migration_relation)
and u.id_migration_relation <> r.id_migration_relation_keep
;
/*
update meta.migration_relation u
set ismodel = false
,isdb = false
from (
select mr.relationname
, mr.rid_migration_table_child
, mr.rid_migration_table_parent
, count(1) as cnt
, array_agg(mr.id_migration_relation) as a_id_migration_relation
, min(mr.id_migration_relation) as id_migration_relation
from meta.migration_relation mr
where mr.ismodel
group by mr.relationname, mr.rid_migration_table_child, mr.rid_migration_table_parent
) r
where u.id_migration_relation = any(r.a_id_migration_relation)
and u.id_migration_relation is distinct from r.id_migration_relation
;
*/
raise notice 'done @ %', (clock_timestamp() - m_tm)::interval;
m_tm = clock_timestamp();
p_info = coalesce(jsonb_concat(p_info, m_json::jsonb),p_info);
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',2,'objecttype',m_funcname)::text);
EXCEPTION
WHEN others THEN
GET STACKED DIAGNOSTICS
m_errmsg = MESSAGE_TEXT
,m_errcontext = PG_EXCEPTION_CONTEXT
,m_errdetail = PG_EXCEPTION_DETAIL
,m_errhint = PG_EXCEPTION_HINT
,m_errstate = RETURNED_SQLSTATE;
p_retval = 1;
p_errmsg = format('%s Context %s State: %s', m_errmsg, m_errcontext, m_errstate);
raise warning '% % hint:% state:% context:%',m_errmsg,m_errdetail,m_errhint,m_errstate,m_errcontext;
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',3,'error',m_errmsg,'objecttype',m_funcname)::text);
END;
$$;

View File

@@ -0,0 +1,199 @@
select * from dropall('migrations_run','meta');
CREATE OR REPLACE FUNCTION meta.migration_run(
p_id_migration_model integer default null
,p_priority_start integer default 0
,p_priority_end integer default 10000
,INOUT p_info jsonb default null
,OUT p_retval integer
,OUT p_errmsg text
)
LANGUAGE plpgsql VOLATILE
SECURITY DEFINER
AS
$$
DECLARE
--Error Handling--
m_funcname text = 'meta.migration_run';
m_errmsg text;
m_errcontext text;
m_errdetail text;
m_errhint text;
m_errstate text;
m_retval integer;
--Error Handling--
m_dblink_conn TEXT;
m_dblink_login TEXT;
m_gotemplate text;
m_pid integer;
r_lp record;
m_errcnt integer;
G_BREAK_ERR_CNT constant integer = 500;
BEGIN
m_dblink_conn = 'dblink_migration';
p_retval = 0;
p_errmsg = '';
m_pid = pg_backend_pid();
p_info = jsonb_build_array();
m_gotemplate = $CQl$do $OUTERDO$
declare
m_lock_timeout text = '1500ms';
m_max_attempts int = 2;
m_ddl_completed boolean = false;
begin
set local lock_timeout = '1500ms';
perform set_config('lock_timeout', m_lock_timeout, false);
for i in 1..m_max_attempts loop
begin
execute $EXECMIGRATION$[proc]$EXECMIGRATION$;
m_ddl_completed = true;
exit;
exception when lock_not_available then
raise warning 'attempt %/% to lock table "test" failed', i, m_max_attempts;
perform pg_sleep(0.1);
end;
-- if i > 1
-- then
-- SET statement_timeout = 20000;
-- end if;
end loop;
end
$OUTERDO$;
$CQl$;
select 'host=127.0.0.1 dbname='||r.dbname||' port=' || r.port || ' user='|| r.loginuser ||' password=' || r.password
from f_get_local_conn('migration') r
into m_dblink_login;
if not exists (SELECT T.text from unnest(dblink_get_connections()) T where T.text = m_dblink_conn)
then
PERFORM dblink_connect(m_dblink_conn, m_dblink_login);
end if;
perform log_event(m_funcname,format('migrations_run init [%s to %s] with connection %s',p_priority_start,p_priority_end,m_dblink_conn),bt_enum('eventlog','upgrade'));
PERFORM dblink_exec(m_dblink_conn, format($S$set application_name to 'MIGRATION:%s'; $S$, m_pid), false);
for r_lp in (
select *
,''::text as result
,clock_timestamp() as startat
from meta.migration_script s
where coalesce(s.status,0) = 0
and s.priority >= p_priority_start
and s.priority <= p_priority_end
and octet_length(s.body) > 1
order by s.priority, s.sequence, s.id_migration_script
)
loop
if m_errcnt > G_BREAK_ERR_CNT
then
perform log_event(m_funcname,format('migrations_run Max Error Count [%s to %s] Errors %s/s%',p_priority_start,p_priority_end,m_errcnt,G_BREAK_ERR_CNT),bt_enum('eventlog','upgrade'));
exit;
end if;
perform dblink_exec(m_dblink_conn, format($Q$ DO $OUTERDO$ BEGIN
if not meta.f_can_migrate()
then
raise exception 'Cannot migrate. Check the f_can_migrate function.';
end if;
update meta.migration_script u
set status = 1
,changedat = now()
where u.id_migration_script = %1$s;
END; $OUTERDO$; $Q$, r_lp.id_migration_script), true)
;
begin
--raise notice 'Lp %: %',r_lp.objectname, r_lp.result;
--raise notice 'Executing % % [%,%]', r_lp.objectname , r_lp.objecttype, r_lp.priority, r_lp.sequence;
select dblink_exec(m_dblink_conn, replace(replace(replace(m_gotemplate,'[proc]', r_lp.body),'[objectname]',r_lp.objectname),'[priority]',r_lp.priority::text), true)
into r_lp.result;
r_lp.duration = clock_timestamp() - r_lp.startat;
perform log_event(m_funcname,format('Ran Script %s @%s ',r_lp.objectname,r_lp.priority),bt_enum('eventlog','upgrade'));
perform dblink_exec(m_dblink_conn, format($Q$
update meta.migration_script u
set status = 2
,changedat = now()
,duration = %2$s::interval
where u.id_migration_script = %1$s;
$Q$, r_lp.id_migration_script, quote_literal( r_lp.duration)), true)
;
p_info = jsonb_concat(p_info, jsonb_build_object('objectname',r_lp.objectname,'objecttype', r_lp.objecttype,'status','success'));
EXCEPTION
WHEN others THEN
GET STACKED DIAGNOSTICS
m_errmsg = MESSAGE_TEXT;
raise notice 'Exception % -> %',r_lp.objectname, m_errmsg;
r_lp.duration = clock_timestamp() - r_lp.startat;
perform log_event(m_funcname,format('migrations_run error %s -> %s >> %s',r_lp.objecttype,r_lp.objectname,m_errmsg),bt_enum('eventlog','local error'));
perform dblink_exec(m_dblink_conn, format($Q$
update meta.migration_script u
set status = 3
,error = %2$s || E' \nResult: ' || %3$s
,changedat = now()
,duration = %4$s::interval
where u.id_migration_script = %1$s;
$Q$, r_lp.id_migration_script, quote_literal(m_errmsg), quote_literal(coalesce(r_lp.result,'')), quote_literal( r_lp.duration) ), false);
p_info = jsonb_concat(p_info, jsonb_build_object('objectname',r_lp.objectname,'objecttype', r_lp.objecttype,'status','error', 'error',m_errmsg));
m_errcnt = coalesce(m_errcnt,0) + 1;
end;
perform dblink_exec(m_dblink_conn, format($Q$DO $DDD$
declare
m_text text;
begin
select pg_notify('upgrade.events', json_build_object('type','upgrade','status',m.status,'objecttype', m.objecttype,'objectname',m.objectname)::text)
from meta.migration_script m
where m.id_migration_script = %1$s
into m_text;
end;
$DDD$;$Q$, r_lp.id_migration_script, quote_literal(m_errmsg), quote_literal(coalesce(r_lp.result,'')) ), false);
end loop;
perform dblink_disconnect(m_dblink_conn);
EXCEPTION
WHEN others THEN
GET STACKED DIAGNOSTICS
m_errmsg = MESSAGE_TEXT
,m_errcontext = PG_EXCEPTION_CONTEXT
,m_errdetail = PG_EXCEPTION_DETAIL
,m_errhint = PG_EXCEPTION_HINT
,m_errstate = RETURNED_SQLSTATE;
p_retval = 1;
p_errmsg = format('%s Context %s State: %s', m_errmsg, m_errcontext, m_errstate);
if exists (SELECT T.text from unnest(dblink_get_connections()) T where T.text = m_dblink_conn)
then
perform dblink_disconnect(m_dblink_conn);
end if;
perform pg_notify('upgrade.events', json_build_object('type','upgrade','status',3,'error',m_errmsg,'objecttype',m_funcname)::text);
END;
$$;

View File

@@ -0,0 +1,53 @@
with ops (name,value) as (
values ('guidtype','uuid')
,('max_constraint','30')
,('droprename','_dropped_')
,('settype_names_jsonb','jsonvalue,jsonbvalue,jsonschema')
,('default_schema:hub','core')
,('default_schema:hub_link','core')
--,('default_schema:colour','core')
--- ,('default_schema:mastertype','core') --Master type must not be set for pgifas
,('default_schema:workflowitem','core')
,('exclude:public','suppinvoice,suppinvoiceitem,project,generalledger,supporder,subaccount,customerinvoice,custcreditnoteitem,custinvoiceitem,cashbookdetail,supporderitem,bankstatement,journaldetail,custcreditnote,custcreditnoteitem,autoreconstring,Suppcreditnote,suppcreditnoteitem')
-- ,('default_schema:fiscalyear','core')
-- ,('default_schema:autoreconstring','core')
-- ,('default_schema:postalcode','core')
-- ,('default_schema:cashbookdetail','core')
-- ,('default_schema:supporderitem','core')
-- ,('default_schema:accounttype','core')
-- ,('default_schema:bankstatement','core')
-- ,('default_schema:journaldetail','core')
-- ,('default_schema:cashbook','core')
-- ,('default_schema:custcreditnote','core')
-- ,('default_schema:custcreditnoteitem','core')
-- ,('default_schema:custinvoiceitem','core')
-- ,('default_schema:customerinvoice','core')
-- ,('default_schema:generalledger','core')
-- ,('default_schema:journal','core')
-- ,('default_schema:suppcreditnote','core')
-- ,('default_schema:suppcreditnoteitem','core')
-- ,('default_schema:suppinvoice','core')
-- ,('default_schema:suppinvoiceitem','core')
-- ,('default_schema:supporder','core')
-- ,('default_schema:subaccount','core')
-- ,('default_schema:project','core')
-- ,('default_schema:period','core')
), upd as (
update meta.migration_option o
set value = ops.value
from ops
where o.name = ops.name
returning *
)
insert into meta.migration_option(name,value)
select ops.name,ops.value from ops
where not exists (
select *
from meta.migration_option o
where o.name = ops.name
);
delete from meta.migration_option o
where o.name in ('default_schema:mastertype','default_schema:colour')
;

View File

@@ -0,0 +1,96 @@
package main
import (
"fmt"
"log"
"git.warky.dev/wdevs/relspecgo/pkg/readers"
"git.warky.dev/wdevs/relspecgo/pkg/readers/pgsql"
)
func main() {
// Example PostgreSQL connection string
// Format: postgres://username:password@localhost:5432/database_name
connectionString := "postgres://user:password@localhost:5432/mydb"
// Create reader options
options := &readers.ReaderOptions{
ConnectionString: connectionString,
}
// Create PostgreSQL reader
reader := pgsql.NewReader(options)
// Read the entire database
db, err := reader.ReadDatabase()
if err != nil {
log.Fatalf("Failed to read database: %v", err)
}
// Display database information
fmt.Printf("Database: %s\n", db.Name)
fmt.Printf("Type: %s\n", db.DatabaseType)
fmt.Printf("Version: %s\n", db.DatabaseVersion)
fmt.Printf("Schemas: %d\n\n", len(db.Schemas))
// Iterate through schemas
for _, schema := range db.Schemas {
fmt.Printf("Schema: %s\n", schema.Name)
fmt.Printf(" Tables: %d\n", len(schema.Tables))
fmt.Printf(" Views: %d\n", len(schema.Views))
fmt.Printf(" Sequences: %d\n", len(schema.Sequences))
// Display table details
for _, table := range schema.Tables {
fmt.Printf(" Table: %s.%s\n", schema.Name, table.Name)
fmt.Printf(" Columns: %d\n", len(table.Columns))
fmt.Printf(" Constraints: %d\n", len(table.Constraints))
fmt.Printf(" Indexes: %d\n", len(table.Indexes))
fmt.Printf(" Relationships: %d\n", len(table.Relationships))
// Display columns
for _, col := range table.Columns {
fmt.Printf(" - %s: %s", col.Name, col.Type)
if col.IsPrimaryKey {
fmt.Printf(" [PK]")
}
if col.NotNull {
fmt.Printf(" NOT NULL")
}
fmt.Println()
}
}
// Display view details
for _, view := range schema.Views {
fmt.Printf(" View: %s.%s\n", schema.Name, view.Name)
fmt.Printf(" Columns: %d\n", len(view.Columns))
fmt.Printf(" Definition: %s\n", view.Definition)
}
// Display sequence details
for _, seq := range schema.Sequences {
fmt.Printf(" Sequence: %s.%s\n", schema.Name, seq.Name)
fmt.Printf(" Start: %d, Increment: %d\n", seq.StartValue, seq.IncrementBy)
if seq.OwnedByTable != "" {
fmt.Printf(" Owned by: %s.%s\n", seq.OwnedByTable, seq.OwnedByColumn)
}
}
fmt.Println()
}
// Example: Read just the first schema
schema, err := reader.ReadSchema()
if err != nil {
log.Fatalf("Failed to read schema: %v", err)
}
fmt.Printf("First schema: %s\n", schema.Name)
// Example: Read just the first table
table, err := reader.ReadTable()
if err != nil {
log.Fatalf("Failed to read table: %v", err)
}
fmt.Printf("First table: %s.%s\n", table.Schema, table.Name)
}