PostgreSQL: Difference between revisions

From Chorke Wiki
Jump to navigation Jump to search
No edit summary
Line 83: Line 83:


|valign='top'|
|valign='top'|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
GRANT ALL ON tmp_coll TO academia;
GRANT ALL ON tmp_coll TO academia;
GRANT ALL ON test_json TO academia;
GRANT ALL ON test_json TO academia;
Line 89: Line 89:


|valign='top'|
|valign='top'|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- psql -V
-- psql -V
SELECT version();
SELECT version();
Line 132: Line 132:
==Extensions==
==Extensions==
{|
{|
| valign="top" |
|valign='top'|
<syntaxhighlight lang="bash">
<syntaxhighlight lang="bash">
ENABLE_DB_LINK=$(cat <<-DDL
ENABLE_DB_LINK=$(cat <<-DDL
Line 144: Line 144:
</syntaxhighlight>
</syntaxhighlight>


| valign="top" |
|valign='top'|
<syntaxhighlight lang="bash">
<syntaxhighlight lang="bash">
ENABLE_PG_CRYPTO=$(cat <<-DDL
ENABLE_PG_CRYPTO=$(cat <<-DDL
Line 160: Line 160:
----
----
|-
|-
| valign="top" |
|valign='top'|
<syntaxhighlight lang="bash">
<syntaxhighlight lang="bash">
sudo apt install postgresql-16-pgvector
sudo apt install postgresql-16-pgvector
Line 188: Line 188:
{|
{|
|valign="top"|
|valign="top"|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- show databases
-- show databases
SELECT datname FROM pg_database;            -- \l+
SELECT datname FROM pg_database;            -- \l+
Line 210: Line 210:


|valign="top"|
|valign="top"|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT pg_database_size('my_database');                    -- \l+
SELECT pg_database_size('my_database');                    -- \l+
SELECT pg_size_pretty(pg_database_size('my_database'));    -- \l+
SELECT pg_size_pretty(pg_database_size('my_database'));    -- \l+
Line 235: Line 235:
==XML Query==
==XML Query==
{|
{|
| valign="top" |
|valign='top'|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT
SELECT
     xml_is_well_formed('<>')        AS "<>",                  -- false
     xml_is_well_formed('<>')        AS "<>",                  -- false
Line 246: Line 246:
</syntaxhighlight>
</syntaxhighlight>


| valign="top" |
|valign='top'|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT
SELECT
     xml_is_well_formed_content('<>')        AS "<>",          -- false
     xml_is_well_formed_content('<>')        AS "<>",          -- false
Line 261: Line 261:
----
----
|-
|-
| valign="top" |
|valign='top'|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT
SELECT
     xml_is_well_formed_document('<>')        AS "<>",        -- false
     xml_is_well_formed_document('<>')        AS "<>",        -- false
Line 272: Line 272:
</syntaxhighlight>
</syntaxhighlight>


| valign="top" |
|valign='top'|


|-
|-
Line 278: Line 278:
----
----
|-
|-
| valign="top" |
|valign='top'|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
WITH xpath_node AS (
WITH xpath_node AS (
     SELECT
     SELECT
Line 298: Line 298:
</syntaxhighlight>
</syntaxhighlight>


| valign="top" |
|valign='top'|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
WITH workflow_namespace AS (
WITH workflow_namespace AS (
     SELECT
     SELECT
Line 323: Line 323:
|-
|-
| valign="top" colspan="2"|
| valign="top" colspan="2"|
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
WITH xpath_ns AS (
WITH xpath_ns AS (
     SELECT
     SELECT
Line 370: Line 370:


==Array Query==
==Array Query==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT dependencies||'"Running Fiscal Year"'
SELECT dependencies||'"Running Fiscal Year"'
FROM scope WHERE name = 'Fiscal Year Config';
FROM scope WHERE name = 'Fiscal Year Config';
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT * FROM user
SELECT * FROM user
WHERE owner_ref LIKE ANY (ARRAY[
WHERE owner_ref LIKE ANY (ARRAY[
Line 382: Line 382:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT * FROM user
SELECT * FROM user
WHERE owner_ref LIKE ALL (ARRAY[
WHERE owner_ref LIKE ALL (ARRAY[
Line 400: Line 400:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT extended_properties ->'commission'->'value'
SELECT extended_properties ->'commission'->'value'
FROM seller WHERE extended_properties ->>'type' = 'Reseller';
FROM seller WHERE extended_properties ->>'type' = 'Reseller';
Line 418: Line 418:
----
----
{|
{|
| valign="top" |
|valign='top'|
'''JSON'''
'''JSON'''
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT id, name FROM json_to_record (
SELECT id, name FROM json_to_record (
     '{"id":1,"name":"Academia"}'::json
     '{"id":1,"name":"Academia"}'::json
Line 426: Line 426:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT * FROM json_array_elements_text('[
SELECT * FROM json_array_elements_text('[
     {"id":1,"name":"Academia"},
     {"id":1,"name":"Academia"},
Line 434: Line 434:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT id, name FROM json_array_elements_text('[
SELECT id, name FROM json_array_elements_text('[
     {"id": 1, "name": "Academia"},
     {"id": 1, "name": "Academia"},
Line 444: Line 444:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT id, name FROM json_array_elements('[
SELECT id, name FROM json_array_elements('[
     {"id": 1, "name": "Academia"},
     {"id": 1, "name": "Academia"},
Line 454: Line 454:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT id, name FROM json_to_recordset('{"data": [
SELECT id, name FROM json_to_recordset('{"data": [
     {"concerns": {"id": 1, "name": "Academia"}},
     {"concerns": {"id": 1, "name": "Academia"}},
Line 463: Line 463:
</syntaxhighlight>
</syntaxhighlight>


| valign="top" |
|valign='top'|
'''JSONB'''
'''JSONB'''
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT id, name FROM jsonb_to_record (
SELECT id, name FROM jsonb_to_record (
     '{"id":1,"name":"Academia"}'::jsonb
     '{"id":1,"name":"Academia"}'::jsonb
Line 471: Line 471:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT * FROM jsonb_array_elements_text('[
SELECT * FROM jsonb_array_elements_text('[
     {"id":1,"name":"Academia"},
     {"id":1,"name":"Academia"},
Line 479: Line 479:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT id, name FROM jsonb_array_elements_text('[
SELECT id, name FROM jsonb_array_elements_text('[
     {"id": 1, "name": "Academia"},
     {"id": 1, "name": "Academia"},
Line 489: Line 489:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT id, name FROM jsonb_array_elements('[
SELECT id, name FROM jsonb_array_elements('[
     {"id": 1, "name": "Academia"},
     {"id": 1, "name": "Academia"},
Line 499: Line 499:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT id, name FROM jsonb_to_recordset('{"data": [
SELECT id, name FROM jsonb_to_recordset('{"data": [
     {"concerns": {"id": 1, "name": "Academia"}},
     {"concerns": {"id": 1, "name": "Academia"}},
Line 508: Line 508:
</syntaxhighlight>
</syntaxhighlight>


| valign="top" |
|valign='top'|
'''Result'''
'''Result'''
   id |  name   
   id |  name   
Line 545: Line 545:
----
----
===Insert===
===Insert===
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- query » group : religion
-- query » group : religion
-- -----------------------------------------------------------------------------
-- -----------------------------------------------------------------------------
Line 595: Line 595:


==Delete Join==
==Delete Join==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- USING is not an ANSI standard not supported by others RDMBS
-- USING is not an ANSI standard not supported by others RDMBS
-- it's better to use sub query rather than USING for join
-- it's better to use sub query rather than USING for join
Line 761: Line 761:


==Date & Time==
==Date & Time==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT * FROM policy  
SELECT * FROM policy  
WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd');
WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd');
Line 769: Line 769:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT EXTRACT(CENTURY FROM TIMESTAMP '2013-10-10 10:10:10');
SELECT EXTRACT(CENTURY FROM TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_TRUNC('year', TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_TRUNC('year', TIMESTAMP '2013-10-10 10:10:10');
Line 791: Line 791:


==String Format==
==String Format==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT STRING_TO_ARRAY('/my/request/path', '/') "string_array";
SELECT STRING_TO_ARRAY('/my/request/path', '/') "string_array";
SELECT ARRAY_TO_JSON(STRING_TO_ARRAY('/my/request/path', '/')) "json_array";
SELECT ARRAY_TO_JSON(STRING_TO_ARRAY('/my/request/path', '/')) "json_array";
Line 797: Line 797:


==Number Format==
==Number Format==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT NULLIF(regexp_replace('CKI00109'    , '\D','','g'), '')::numeric;
SELECT NULLIF(regexp_replace('CKI00109'    , '\D','','g'), '')::numeric;
SELECT NULLIF(regexp_replace('CKI0010#9####', '\D','','g'), '')::numeric;
SELECT NULLIF(regexp_replace('CKI0010#9####', '\D','','g'), '')::numeric;
Line 808: Line 808:
  pg_dump academia| gzip > academia_2020021.sql.gz
  pg_dump academia| gzip > academia_2020021.sql.gz


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT pg_terminate_backend(pid)
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity WHERE datname IN
FROM pg_stat_activity WHERE datname IN
Line 828: Line 828:


==OS User Auth==
==OS User Auth==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
CREATE DATABASE academia;
CREATE DATABASE academia;
CREATE USER academia WITH LOGIN;
CREATE USER academia WITH LOGIN;
Line 842: Line 842:


==DBLink POC==
==DBLink POC==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
CREATE EXTENSION dblink;
CREATE EXTENSION dblink;
CREATE EXTENSION dblink SCHEMA extensions;
CREATE EXTENSION dblink SCHEMA extensions;
Line 850: Line 850:
</syntaxhighlight>
</syntaxhighlight>
----
----
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- using pgsql block
-- using pgsql block
DO
DO
Line 873: Line 873:
</syntaxhighlight>
</syntaxhighlight>
----
----
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- using pgsql query
-- using pgsql query
WITH user_info AS (
WITH user_info AS (
Line 898: Line 898:


==DBLink Exec==
==DBLink Exec==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
DO
DO
$BODY$
$BODY$
Line 909: Line 909:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
DO
DO
$BODY$
$BODY$
Line 940: Line 940:


==Recursive Query==
==Recursive Query==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
WITH RECURSIVE hierarchy AS (
WITH RECURSIVE hierarchy AS (
     SELECT
     SELECT
Line 964: Line 964:


==Encode/Decode==
==Encode/Decode==
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT decode('Chorke Academia, Inc.', 'escape');            -- text to binary
SELECT decode('Chorke Academia, Inc.', 'escape');            -- text to binary
SELECT encode('Chorke Academia, Inc.'::bytea, 'escape');    -- binary to text
SELECT encode('Chorke Academia, Inc.'::bytea, 'escape');    -- binary to text
Line 976: Line 976:
----
----
'''Base36 Encode'''
'''Base36 Encode'''
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
CREATE OR REPLACE FUNCTION fn_base36_encode(IN base10 bigint)
CREATE OR REPLACE FUNCTION fn_base36_encode(IN base10 bigint)
     RETURNS  varchar
     RETURNS  varchar
Line 999: Line 999:
----
----
'''Base36 Decode'''
'''Base36 Decode'''
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
CREATE OR REPLACE FUNCTION fn_base36_decode(IN base36 varchar)
CREATE OR REPLACE FUNCTION fn_base36_decode(IN base36 varchar)
     RETURNS  bigint
     RETURNS  bigint
Line 1,031: Line 1,031:
  \dfS lo_*
  \dfS lo_*


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- lo_open(lobjId oid, mode integer) returns integer
-- lo_open(lobjId oid, mode integer) returns integer
-- The mode parameter to lo_open uses two constants:
-- The mode parameter to lo_open uses two constants:
Line 1,044: Line 1,044:
</syntaxhighlight>
</syntaxhighlight>
----
----
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- example: a
-- example: a
SELECT lo_get(photo::oid), lo_get(resume::oid) FROM employee;                        -- select large object as binary data
SELECT lo_get(photo::oid), lo_get(resume::oid) FROM employee;                        -- select large object as binary data
Line 1,051: Line 1,051:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- example: b
-- example: b
SELECT resume FROM profile;                                  -- oid
SELECT resume FROM profile;                                  -- oid
Line 1,059: Line 1,059:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- example: c
-- example: c
SELECT convert_from(lo_get(resume::oid), 'UTF8') FROM profile;
SELECT convert_from(lo_get(resume::oid), 'UTF8') FROM profile;
Line 1,069: Line 1,069:
----
----
===Insert===
===Insert===
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- right way to insert in insert
-- right way to insert in insert
INSERT INTO employee(id, resume, name) VALUES(
INSERT INTO employee(id, resume, name) VALUES(
Line 1,079: Line 1,079:


===Select===
===Select===
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- right way to select in select
-- right way to select in select
SELECT
SELECT
Line 1,091: Line 1,091:


===Update===
===Update===
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- right way to create in update
-- right way to create in update
-- wrong way to update in update
-- wrong way to update in update
Line 1,108: Line 1,108:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- right way to update in update
-- right way to update in update
-- SELECT oid FROM pg_largeobject_metadata;
-- SELECT oid FROM pg_largeobject_metadata;
Line 1,136: Line 1,136:


===Delete===
===Delete===
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- wrong way to unlink in delete
-- wrong way to unlink in delete
-- SELECT oid FROM pg_largeobject_metadata;
-- SELECT oid FROM pg_largeobject_metadata;
Line 1,149: Line 1,149:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- right way to unlink in delete
-- right way to unlink in delete
-- SELECT oid FROM pg_largeobject_metadata;
-- SELECT oid FROM pg_largeobject_metadata;
Line 1,174: Line 1,174:


===Alter===
===Alter===
<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
ALTER TABLE employee
ALTER TABLE employee
--  ALTER COLUMN photo TYPE bytea USING lo_get(photo::oid);
--  ALTER COLUMN photo TYPE bytea USING lo_get(photo::oid);
Line 1,235: Line 1,235:
  sed -i -e 's|^-- ||g' src/main/resources/db/migration/*.sql
  sed -i -e 's|^-- ||g' src/main/resources/db/migration/*.sql


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- psql
-- psql
CREATE DATABASE chorke_init_pro;
CREATE DATABASE chorke_init_pro;
Line 1,243: Line 1,243:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
-- revoke public connect from specific database after creation
-- revoke public connect from specific database after creation
REVOKE CONNECT ON DATABASE academia_ebis_dev FROM PUBLIC;
REVOKE CONNECT ON DATABASE academia_ebis_dev FROM PUBLIC;
Line 1,250: Line 1,250:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT
SELECT
     con.conname,
     con.conname,
Line 1,264: Line 1,264:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT
SELECT
     n.nspname AS "schema",
     n.nspname AS "schema",
Line 1,290: Line 1,290:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT d.datname                                      AS "db_name",
SELECT d.datname                                      AS "db_name",
     pg_catalog.pg_get_userbyid(d.datdba)              AS "db_owner",
     pg_catalog.pg_get_userbyid(d.datdba)              AS "db_owner",
Line 1,309: Line 1,309:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
SELECT
SELECT
     usesysid  AS "user_id",
     usesysid  AS "user_id",
Line 1,318: Line 1,318:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
DO
DO
$BODY$
$BODY$
Line 1,335: Line 1,335:
</syntaxhighlight>
</syntaxhighlight>


<syntaxhighlight lang="sql">
<syntaxhighlight style='margin:3px 0' lang='sql'>
cat << EOF | psql -U postgres
cat << EOF | psql -U postgres
DROP DATABASE IF EXISTS academia_ebis_staging;
DROP DATABASE IF EXISTS academia_ebis_staging;
Line 1,349: Line 1,349:


==Allow Remote==
==Allow Remote==
{|
{|class='wikitable mw-collapsible' style='width:100%;margin:3px 0'
| valign="top" |
!scope='col' style='text-align:left' colspan='2'|
apt install -y iputils-ping telnet dnsutils
Docker » Compose » Manage
sudo systemctl status ufw
sudo ufw status verbose
 
| valign="top" |
sudo ufw status numbered
sudo ufw allow 5432/tcp
sudo ufw enable
 
|-
|-
| colspan="2" |
|valign='top' style='width:50%'|
----
<syntaxhighlight style='margin:3px 0' lang='bash'>
apt install -y iputils-ping telnet dnsutils
sudo systemctl status ufw
sudo ufw status verbose
</syntaxhighlight>
|valign='top' style='width:50%'|
<syntaxhighlight style='margin:3px 0' lang='bash'>
sudo ufw status numbered
sudo ufw allow 5432/tcp
sudo ufw enable
</syntaxhighlight>
|-
|-
| valign="bottom" |
|valign='top'|
nano /etc/postgresql/14/main/postgresql.conf
<syntaxhighlight style='margin:3px 0' lang='bash'>
nano /etc/postgresql/16/main/postgresql.conf
nano /etc/postgresql/14/main/postgresql.conf
 
nano /etc/postgresql/16/main/postgresql.conf
| valign="top" |
</syntaxhighlight>
nano /etc/postgresql/14/main/pg_hba.conf
|valign='top'|
nano /etc/postgresql/16/main/pg_hba.conf
<syntaxhighlight style='margin:3px 0' lang='bash'>
 
nano /etc/postgresql/14/main/pg_hba.conf
|-
nano /etc/postgresql/16/main/pg_hba.conf
| colspan="2" |
</syntaxhighlight>
----
|-
|-
| valign="bottom" |
|valign='bottom'|
<syntaxhighlight lang="ini" start="60" line>
<syntaxhighlight style='margin:3px 0' lang='ini' start='60' line>
listen_addresses = '*'                  # what IP address(es) to listen on;
listen_addresses = '*'                  # what IP address(es) to listen on;
                                         # comma-separated list of addresses;
                                         # comma-separated list of addresses;
Line 1,385: Line 1,386:
</syntaxhighlight>
</syntaxhighlight>


| valign="top" |
|valign='top'|
<syntaxhighlight lang="ini" start="96" line>
<syntaxhighlight style='margin:3px 0' lang='ini' start='96' line>
# IPv4 local connections:
# IPv4 local connections:
host    all            all            127.0.0.1/32            scram-sha-256
host    all            all            127.0.0.1/32            scram-sha-256
Line 1,396: Line 1,397:


==Spread Sheet==
==Spread Sheet==
<syntaxhighlight lang="python">
{|class='wikitable mw-collapsible' style='width:100%;margin:3px 0'
!scope='col' style='text-align:left'|
Spread Sheet
|-
|valign='top'|
<syntaxhighlight style='margin:3px 0' lang='python'>
=CONCAT("(LOCALTIMESTAMP, 'religion', 'chorke.org', (SELECT (COALESCE(MAX(id), 0) + 1) FROM grouped_collection), '",  SUBSTITUTE(A2 , """" ,  ""), "', '", SUBSTITUTE(A2 , """" ,  ""),  "', '{""state"": """ , SUBSTITUTE(B2 , "'" , "''")  , """, ""city"": """ , SUBSTITUTE(C2 , "'" , "''") , """}'),")
=CONCAT("(LOCALTIMESTAMP, 'religion', 'chorke.org', (SELECT (COALESCE(MAX(id), 0) + 1) FROM grouped_collection), '",  SUBSTITUTE(A2 , """" ,  ""), "', '", SUBSTITUTE(A2 , """" ,  ""),  "', '{""state"": """ , SUBSTITUTE(B2 , "'" , "''")  , """, ""city"": """ , SUBSTITUTE(C2 , "'" , "''") , """}'),")


Line 1,409: Line 1,415:
=CONCAT("[""",  C27, """, """, B27, "",""", {}, " , A27, "],")
=CONCAT("[""",  C27, """, """, B27, "",""", {}, " , A27, "],")
</syntaxhighlight>
</syntaxhighlight>
|}


==Docker PSQL==
==Docker PSQL==
{|
{|class='wikitable mw-collapsible' style='width:100%;margin:3px 0'
| valign="top" |
!scope='col' style='text-align:left' colspan='3'|
docker run -d --name postgres\
Docker PSQL
  -e POSTGRES_PASSWORD=sadaqah\
|-
  -v ./init.sql:/docker-entrypoint-initdb.d/init.sql\
|valign='top' style='width:33%'|
  -p 127.0.0.1:5433:5432\
<syntaxhighlight style='margin:3px 0' lang='bash'>
  postgres:12.14
docker run -d --name postgres\
-e POSTGRES_PASSWORD=sadaqah\
-v ./init.sql:/docker-entrypoint-initdb.d/init.sql\
-p 127.0.0.1:5433:5432\
postgres:12.14
</syntaxhighlight>


| valign="top" |
|valign='top' style='width:34%'|
docker exec -it postgres bash
<syntaxhighlight style='margin:3px 0' lang='bash'>
psql -U postgres
docker exec -it postgres bash
psql -U postgres
   
   
docker stop postgres
docker stop postgres
docker rm postgres
docker rm postgres
</syntaxhighlight>


| valign="top" |
|valign='top' style='width:33%'|
docker exec -it postgres\
<syntaxhighlight style='margin:3px 0' lang='bash'>
  psql -U postgres
docker exec -it postgres\
psql -U postgres
   
   
docker ps -a
docker ps -a
docker rm postgres -f
docker rm postgres -f
</syntaxhighlight>
|}
|}


==References ==
==References==
{|
{|class='wikitable mw-collapsible' style='width:100%;margin:3px 0'
| valign="top" |
!scope='col' style='text-align:left' colspan='3'|
References
|-
|valign='top' style='width:33%'|
* [https://dba.stackexchange.com/questions/17790/ PostgreSQL users can access all databases without any grants]
* [https://dba.stackexchange.com/questions/17790/ PostgreSQL users can access all databases without any grants]
* [https://medium.com/coding-blocks/creating-user-database-and-adding-access-on-postgresql-8bfcd2f4a91e Creating user, database and adding access on PostgreSQL]
* [https://medium.com/coding-blocks/creating-user-database-and-adding-access-on-postgresql-8bfcd2f4a91e Creating user, database and adding access on PostgreSQL]
Line 1,448: Line 1,467:
* [https://www.postgresql.org/docs/10/static/plpgsql-trigger.html PL/PgSQL Trigger 10]
* [https://www.postgresql.org/docs/10/static/plpgsql-trigger.html PL/PgSQL Trigger 10]


| valign="top" |
|valign='top' style='width:34%'|
* [https://stackoverflow.com/questions/24573027/ Custom Thread-safe Sequence by Hibernate]
* [https://stackoverflow.com/questions/24573027/ Custom Thread-safe Sequence by Hibernate]
* [https://www.postgresql.org/docs/current/functions-datetime.html Date/Time Functions and Operators]
* [https://www.postgresql.org/docs/current/functions-datetime.html Date/Time Functions and Operators]
Line 1,460: Line 1,479:
* [https://www.postgresql.org/docs/current/datatype.html Data Types]
* [https://www.postgresql.org/docs/current/datatype.html Data Types]


| valign="top" |
|valign='top' style='width:33%'|
* [https://stackoverflow.com/questions/8833679/ <code>PL/pgSQL</code> Create dynamic table from string]
* [https://stackoverflow.com/questions/8833679/ <code>PL/pgSQL</code> Create dynamic table from string]
* [https://www.postgresql.org/docs/current/plpgsql.html <code>PL/pgSQL</code> SQL Procedural Language]
* [https://www.postgresql.org/docs/current/plpgsql.html <code>PL/pgSQL</code> SQL Procedural Language]
Line 1,468: Line 1,487:
* [https://www.postgresql.org/docs/current/sql-dropfunction.html <code>PL/pgSQL</code> Drop Function]
* [https://www.postgresql.org/docs/current/sql-dropfunction.html <code>PL/pgSQL</code> Drop Function]
* [https://www.postgresql.org/docs/current/dblink.html <code>PL/pgSQL</code> DBLink]
* [https://www.postgresql.org/docs/current/dblink.html <code>PL/pgSQL</code> DBLink]
|-
|-
| colspan="3" |
|valign='top'|
----
|-
| valign="top" |
* [https://www.baeldung.com/spring-data-jpa-stored-procedures Calling Stored Procedures from Spring Data JPA]
* [https://www.baeldung.com/spring-data-jpa-stored-procedures Calling Stored Procedures from Spring Data JPA]
* [https://dba.stackexchange.com/questions/44956/ Explanation of cascade <code>ON DELETE/UPDATE</code>]
* [https://dba.stackexchange.com/questions/44956/ Explanation of cascade <code>ON DELETE/UPDATE</code>]
Line 1,485: Line 1,500:
* [https://www.postgresql.org/docs/current/backup-dump.html PostgreSQL Dump]
* [https://www.postgresql.org/docs/current/backup-dump.html PostgreSQL Dump]


| valign="top" |
|valign='top'|
* [https://stackoverflow.com/questions/38883233/ PostgreSQL <code>jsonb_set</code> multiple keys update]
* [https://stackoverflow.com/questions/38883233/ PostgreSQL <code>jsonb_set</code> multiple keys update]
* [https://stackoverflow.com/questions/2072776/ Remove a item from the array-type Column]
* [https://stackoverflow.com/questions/2072776/ Remove a item from the array-type Column]
Line 1,497: Line 1,512:
* [https://www.postgresql.org/docs/current/functions-json.html JSON Functions and Operators]
* [https://www.postgresql.org/docs/current/functions-json.html JSON Functions and Operators]


| valign="top" |
|valign='top'|
* [https://stackoverflow.com/questions/57168773/ Does JPA delete also remove large objects of Blob fields]
* [https://stackoverflow.com/questions/57168773/ Does JPA delete also remove large objects of Blob fields]
* [https://stackoverflow.com/questions/22654170/ Explanation of JSONB introduced by PostgreSQL]
* [https://stackoverflow.com/questions/22654170/ Explanation of JSONB introduced by PostgreSQL]
Line 1,508: Line 1,523:
* [https://www.rightbrainnetworks.com/2010/03/02/base36-conversion-in-postgresql/ Base36 Conversion in PostgreSQL]
* [https://www.rightbrainnetworks.com/2010/03/02/base36-conversion-in-postgresql/ Base36 Conversion in PostgreSQL]
* [https://stackoverflow.com/questions/23490965/ Remove attribute from JSON]
* [https://stackoverflow.com/questions/23490965/ Remove attribute from JSON]
|-
|-
| colspan="3" |
|valign='top'|
----
|-
| valign="top" |
* [https://scalegrid.io/blog/using-jsonb-in-postgresql-how-to-effectively-store-index-json-data-in-postgresql/ PostgreSQL » Effectively Store & Index JSON Data]
* [https://scalegrid.io/blog/using-jsonb-in-postgresql-how-to-effectively-store-index-json-data-in-postgresql/ PostgreSQL » Effectively Store & Index JSON Data]
* [https://medium.com/@mnu/update-a-postgresql-table-using-a-with-query-648eefaae2a6 PostgreSQL » Update a table using a WITH query]
* [https://medium.com/@mnu/update-a-postgresql-table-using-a-with-query-648eefaae2a6 PostgreSQL » Update a table using a WITH query]
Line 1,525: Line 1,536:
* [https://www.postgresql.org/docs/14/queries-with.html PostgreSQL » WITH Queries]
* [https://www.postgresql.org/docs/14/queries-with.html PostgreSQL » WITH Queries]


| valign="top" |
|valign='top'|
* [https://stackoverflow.com/questions/50364904/ PostgreSQL » <code>WITH</code> clause with <code>INSERT</code> statement]
* [https://stackoverflow.com/questions/50364904/ PostgreSQL » <code>WITH</code> clause with <code>INSERT</code> statement]
* [https://hasura.io/blog/top-psql-commands-and-flags-you-need-to-know-postgresql PostgreSQL » PSQL Short Commands and Flags]
* [https://hasura.io/blog/top-psql-commands-and-flags-you-need-to-know-postgresql PostgreSQL » PSQL Short Commands and Flags]
Line 1,533: Line 1,544:
* [[Dockerized PostgreSQL|PostgreSQL » Dockerized]]
* [[Dockerized PostgreSQL|PostgreSQL » Dockerized]]


| valign="top" |
|valign='top'|
 
|-
|-
| colspan="3" |
|valign='top'|
----
|-
| valign="top" |
* [[Convention for Database Tables]]
* [[Convention for Database Tables]]
* [[Base36 Encoding and Decoding]]
* [[Base36 Encoding and Decoding]]
Line 1,551: Line 1,558:
* [[JPA]]
* [[JPA]]


| valign="top" |
|valign='top'|
* [https://stackoverflow.com/questions/533256/ PostgreSQL » Aggregate multiple rows as array]
* [https://stackoverflow.com/questions/533256/ PostgreSQL » Aggregate multiple rows as array]
* [https://stackoverflow.com/questions/39620317 PostgreSQL » JSON column with H2]
* [https://stackoverflow.com/questions/39620317 PostgreSQL » JSON column with H2]
Line 1,563: Line 1,570:
* [[Jasypt]]
* [[Jasypt]]


| valign="top" |
|valign='top'|
* [https://www.enterprisedb.com/blog/how-configure-oauth-20-pgadmin-4 PostgreSQL » PgAdmin » OAuth 2.0 » Configure]
* [https://www.enterprisedb.com/blog/how-configure-oauth-20-pgadmin-4 PostgreSQL » PgAdmin » OAuth 2.0 » Configure]
* [https://www.pgadmin.org/docs/pgadmin4/6.21/oauth2.html PostgreSQL » PgAdmin » OAuth 2.0 » Enabling]
* [https://www.pgadmin.org/docs/pgadmin4/6.21/oauth2.html PostgreSQL » PgAdmin » OAuth 2.0 » Enabling]

Revision as of 12:23, 15 January 2026

Login to PSQL

export PGPASSWORD='p@55w0rd'

# deploy@cdn.chorke.org:~ #
psql -U postgres
#psql (10.15 (Ubuntu 10.15-0ubuntu0.18.04.1))
#Type "help" for help.

# root@cdn.chorke.org:~ #
su - postgres
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0
# -bash-4.2$

psql
# psql (9.6.8)
# Type "help" for help.
# 
# postgres=#

Connection String


parameters based psql
psql -U postgres
psql -U academia postgres
psql -U academia -h rds.vpc.chorke.org postgres
psql -U academia -h rds.vpc.chorke.org -p 5432 postgres
PGPASSWORD='sadaqah' psql -U academia -h rds.vpc.chorke.org -p 5432 postgres
connection string based psql:
psql 'postgres://postgres:@:/'
psql 'postgres://academia:@:/postgres'
psql 'postgres://academia:@rds.vpc.chorke.org:/postgres'
psql 'postgres://academia:@rds.vpc.chorke.org:5432/postgres'
psql 'postgres://academia:sadaqah@rds.vpc.chorke.org:5432/postgres'

Login Error

# root@cdn.chorke.org:~ #
su - postgres psql
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0
# /bin/createuser: /bin/createuser: cannot execute binary file
# root@cdn.chorke.org:~ #
su postgres
psql -p 5433
# psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1))
# deploy@cdn.chorke.org:~ #
sudo su postgres
psql -p 5433
# psql (12.9 (Ubuntu 12.9-0ubuntu0.20.04.1))

Cluster Login

pg_lsclusters
sudo su - postgres
psql -p 5433
psql -U postgres -p 5433
GRANT ALL ON tmp_coll TO academia;
GRANT ALL ON test_json TO academia;
-- psql -V
SELECT version();

Creating User

# root@cdn.chorke.org:~ #
su - postgres
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0
# -bash-4.2$
createuser root
# createuser: creation of new role failed: ERROR:  role "root" already exists
# ALTER USER root WITH SUPERUSER;

Creating DB

Until creating root database it's always show error could not change directory to "/root": Permission denied. To overcome this error we should need to create a root database. only for avoid error while login using root user.

# root@cdn.chorke.org:~ #
su - postgres
# Last login: Tue Jan 01 12:00:00 MYT 2013 on pts/0
# -bash-4.2$
createdb root

Similarly you can create a database as same as your frequently used OS User. In this case no need password to login. It will authenticated from OS. That's means you can login to your PostgreSQL user using OS Authentication.

Grant Privileges

# postgres=#
grant all privileges on database root to root;
# GRANT

# postgres=#
grant all privileges on database postgres to root;
# GRANT

Extensions

ENABLE_DB_LINK=$(cat <<-DDL
CREATE EXTENSION IF NOT EXISTS dblink;
CREATE EXTENSION IF NOT EXISTS dblink SCHEMA extensions;
SELECT * FROM pg_available_extensions WHERE name = 'dblink';
DDL
)
echo "${ENABLE_DB_LINK}" | psql -p 5432 -U ${USER}
echo "${ENABLE_DB_LINK}" | psql -p 5432 -U ${USER} academia_data_staging
ENABLE_PG_CRYPTO=$(cat <<-DDL
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE EXTENSION IF NOT EXISTS pgcrypto SCHEMA extensions;
SELECT * FROM pg_available_extensions WHERE name = 'pgcrypto';
DDL
)
echo "${ENABLE_PG_CRYPTO}" | psql -p 5432 -U ${USER}
echo "${ENABLE_PG_CRYPTO}" | psql -p 5432 -U ${USER} academia_data_staging

sudo apt install postgresql-16-pgvector

ENABLE_PG_VECTOR=$(cat <<-DDL
CREATE EXTENSION IF NOT EXISTS vector;
CREATE EXTENSION IF NOT EXISTS vector SCHEMA extensions;
SELECT * FROM pg_available_extensions WHERE name = 'vector';
DDL
)
echo "${ENABLE_PG_VECTOR}" | psql -p 5432 -U ${USER}
echo "${ENABLE_PG_VECTOR}" | psql -p 5432 -U ${USER} academia_data_staging

Logout PSQL

# postgres=#
\q
# -bash-4.2$

Meta Data

-- show databases
SELECT datname FROM pg_database;             -- \l+

-- show sequence
SELECT c.relname FROM pg_class c             -- \ds+
WHERE c.relkind = 'S' ORDER BY c.relname;

-- show tables
SELECT * FROM pg_catalog.pg_tables
WHERE schemaname NOT IN ('pg_catalog','information_schema');

-- describe tables
SELECT table_name, column_name, data_type    -- \d+ my_table
FROM information_schema.columns WHERE table_name = 'my_table';

-- find tables with similar field name
SELECT table_name, column_name, data_type 
FROM information_schema.columns WHERE column_name LIKE '%slug%';
SELECT pg_database_size('my_database');                     -- \l+
SELECT pg_size_pretty(pg_database_size('my_database'));     -- \l+


SELECT 
    db.datname AS db_name,
    pg_size_pretty(pg_database_size(db.datname)) AS db_size
FROM pg_database db ORDER BY pg_database_size(db.datname) DESC;



SELECT pg_size_pretty(pg_total_relation_size('my_table'));  -- \dt+
SELECT                                                      -- \dt+
    table_schema, table_name,
    pg_size_pretty(pg_total_relation_size(quote_ident(table_name))) AS table_size
FROM information_schema.tables
WHERE table_schema = 'public'
ORDER BY 2 ASC;

XML Query

SELECT
    xml_is_well_formed('<>')         AS "<>",                  -- false
    xml_is_well_formed('<name >')    AS "<name >",             -- false
    xml_is_well_formed('<name />')   AS "<name />",            -- true
    xml_is_well_formed('academia')   AS "academia",            -- true
    xml_is_well_formed('{academia')  AS "{academia",           -- true
    xml_is_well_formed('{academia}') AS "{academia}";          -- true
SELECT
    xml_is_well_formed_content('<>')         AS "<>",          -- false
    xml_is_well_formed_content('<name >')    AS "<name >",     -- false
    xml_is_well_formed_content('<name />')   AS "<name />",    -- true
    xml_is_well_formed_content('academia')   AS "academia",    -- true
    xml_is_well_formed_content('{academia')  AS "{academia",   -- true
    xml_is_well_formed_content('{academia}') AS "{academia}";  -- true

SELECT
    xml_is_well_formed_document('<>')         AS "<>",         -- false
    xml_is_well_formed_document('<name >')    AS "<name >",    -- false
    xml_is_well_formed_document('<name />')   AS "<name />",   -- true
    xml_is_well_formed_document('academia')   AS "academia",   -- false
    xml_is_well_formed_document('{academia')  AS "{academia",  -- false
    xml_is_well_formed_document('{academia}') AS "{academia}"; -- false

WITH xpath_node AS (
    SELECT
        cast(raw_xpath_node->>0 AS integer) "sn",
        cast(raw_xpath_node->>1 AS varchar) "node"
    FROM jsonb_array_elements('[
        [0, ""],
        [1, "bpmn:definitions"],
        [2, "bpmn:process"],
        [3, "bpmn:serviceTask"],
        [4, "bpmn:extensionElements"],
        [5, "camunda:connector"],
        [6, "camunda:inputOutput"],
        [7, "camunda:inputParameter"]
    ]'::jsonb) AS raw_xpath_node
)
SELECT string_agg(node, '/') AS "path" FROM xpath_node;
WITH workflow_namespace AS (
    SELECT
        cast(raw_workflow_namespace->>0 AS varchar) "prefix",
        cast(raw_workflow_namespace->>1 AS varchar) "namespace"
    FROM jsonb_array_elements('[
        ["bpmn",    "http://www.omg.org/spec/BPMN/20100524/MODEL"],
        ["bpmndi",  "http://www.omg.org/spec/BPMN/20100524/DI"],
        ["dc",      "http://www.omg.org/spec/DD/20100524/DC"],
        ["di",      "http://www.omg.org/spec/DD/20100524/DI"],
        ["modeler", "http://camunda.org/schema/modeler/1.0"],
        ["camunda", "http://camunda.org/schema/1.0/bpmn"]
    ]'::jsonb) AS raw_workflow_namespace
)
-- SELECT * FROM workflow_namespace;
-- SELECT array[prefix, namespace] FROM workflow_namespace;
SELECT array_agg(array[prefix, namespace]) FROM workflow_namespace;

WITH xpath_ns AS (
    SELECT
        cast(raw_xpath_ns->>0 AS varchar) "alias",
        cast(raw_xpath_ns->>1 AS varchar) "namespace"
    FROM jsonb_array_elements('[
        ["bpmn",    "http://www.omg.org/spec/BPMN/20100524/MODEL"],
        ["bpmndi",  "http://www.omg.org/spec/BPMN/20100524/DI"],
        ["dc",      "http://www.omg.org/spec/DD/20100524/DC"],
        ["di",      "http://www.omg.org/spec/DD/20100524/DI"],
        ["modeler", "http://camunda.org/schema/modeler/1.0"],
        ["camunda", "http://camunda.org/schema/1.0/bpmn"]
    ]'::jsonb) AS raw_xpath_ns
),
xpath_nss AS (
    SELECT
        array_agg(array[ns.alias, ns.namespace])
    FROM xpath_ns ns
),
stored_dataflow AS (
    SELECT
        id_ AS "id", rev_ AS "revision",
        generated_ AS "is_generated", tenant_id_ AS "tenant_id",
        type_ AS "type", create_time_ AS "created_on", root_proc_inst_id_ AS "root_proc_inst_id",
        removal_time_ AS "deleted_on", name_ AS "name", convert_from(bytes_, 'UTF8') AS "dataflow"
    FROM act_ge_bytearray
),
stored_workflow AS (
    SELECT
        sd.id, sd.name, sd.type, sd.revision, sd.is_generated,
        sd.created_on,  sd.deleted_on,  sd.root_proc_inst_id,
        CAST(sd.dataflow AS xml)  AS "workflow", sd.dataflow,
        (SELECT * FROM xpath_nss) AS "ns"
    FROM stored_dataflow sd
    WHERE xml_is_well_formed_document(sd.dataflow)
    ORDER BY sd.name ASC, sd.created_on DESC
)
SELECT
    sw.name,
    (xpath('count(/bpmn:definitions/bpmn:process)', sw.workflow, sw.ns))[1]::text::int,
    (xpath('/bpmn:definitions/bpmn:process', sw.workflow, sw.ns))[1], dataflow
FROM stored_workflow sw;

Array Query

SELECT dependencies||'"Running Fiscal Year"'
FROM scope WHERE name = 'Fiscal Year Config';
SELECT * FROM user
WHERE owner_ref LIKE ANY (ARRAY[
   ',academia,chorke.org,shahed.biz,'
]) ORDER BY id ASC;
SELECT * FROM user
WHERE owner_ref LIKE ALL (ARRAY[
   ',academia,chorke.org,shahed.biz,'
]) ORDER BY id ASC;

JSON Query

{
    "type": "Reseller",
    "commission": {
        "type": "Percentage",
        "value": "25"
    }
}
SELECT extended_properties ->'commission'->'value'
FROM seller WHERE extended_properties ->>'type' = 'Reseller';
--
UPDATE seller
SET extended_properties = jsonb_set(extended_properties, '{type}', '"Retailer"')
WHERE id = 1000;
--
UPDATE seller
SET extended_properties = extended_properties || jsonb_build_object('code', code)
WHERE id = 1000;
--
UPDATE seller
SET extended_properties = jsonb_set(extended_properties, '{commission, type}', '"Bonus"')
WHERE id = 1000 AND extended_properties ->> 'type' = 'Reseller';

JSON

SELECT id, name FROM json_to_record (
    '{"id":1,"name":"Academia"}'::json
) AS concern(id int, name text);
SELECT * FROM json_array_elements_text('[
    {"id":1,"name":"Academia"},
    {"id":2,"name":"Agronomy"},
    {"id":3,"name":"Software"}
]'::json);
SELECT id, name FROM json_array_elements_text('[
    {"id": 1, "name": "Academia"},
    {"id": 2, "name": "Agronomy"},
    {"id": 3, "name": "Software"}
]'::json) AS concerns_text,
json_to_record(concerns_text::json)
AS concerns_json(id int, name text);
SELECT id, name FROM json_array_elements('[
    {"id": 1, "name": "Academia"},
    {"id": 2, "name": "Agronomy"},
    {"id": 3, "name": "Software"}
]'::json) AS concerns_array,
json_to_record(concerns_array)
AS concerns_rows(id int, name text);
SELECT id, name FROM json_to_recordset('{"data": [
    {"concerns": {"id": 1, "name": "Academia"}},
    {"concerns": {"id": 2, "name": "Agronomy"}},
    {"concerns": {"id": 3, "name": "Software"}}
]}'::json -> 'data') as r(concerns json),
json_to_record(concerns) AS concern(id int, name text);

JSONB

SELECT id, name FROM jsonb_to_record (
    '{"id":1,"name":"Academia"}'::jsonb
) AS concern(id int, name text);
SELECT * FROM jsonb_array_elements_text('[
    {"id":1,"name":"Academia"},
    {"id":2,"name":"Agronomy"},
    {"id":3,"name":"Software"}
]'::jsonb);
SELECT id, name FROM jsonb_array_elements_text('[
    {"id": 1, "name": "Academia"},
    {"id": 2, "name": "Agronomy"},
    {"id": 3, "name": "Software"}
]'::jsonb) AS concerns_text,
jsonb_to_record(concerns_text::jsonb)
AS concerns_json(id int, name text);
SELECT id, name FROM jsonb_array_elements('[
    {"id": 1, "name": "Academia"},
    {"id": 2, "name": "Agronomy"},
    {"id": 3, "name": "Software"}
]'::jsonb) AS concerns_array,
jsonb_to_record(concerns_array)
AS concerns_rows(id int, name text);
SELECT id, name FROM jsonb_to_recordset('{"data": [
    {"concerns": {"id": 1, "name": "Academia"}},
    {"concerns": {"id": 2, "name": "Agronomy"}},
    {"concerns": {"id": 3, "name": "Software"}}
]}'::jsonb -> 'data') as r(concerns jsonb),
jsonb_to_record(concerns) AS concern(id int, name text);

Result

 id |   name   
----+----------
  1 | Academia
             value             
-------------------------------
 {"id": 1, "name": "Academia"}
 {"id": 2, "name": "Agronomy"}
 {"id": 3, "name": "Software"}
 id |   name   
----+----------
  1 | Academia
  2 | Agronomy
  3 | Software
(3 rows)

 id |   name   
----+----------
  1 | Academia
  2 | Agronomy
  3 | Software
(3 rows)

 id |   name   
----+----------
  1 | Academia
  2 | Agronomy
  3 | Software
(3 rows)

Insert

-- query » group : religion
-- -----------------------------------------------------------------------------
-- SELECT MAX(id) FROM grouped_collection;
-- SELECT *       FROM grouped_collection WHERE group_name = 'religion';
-- DELETE         FROM grouped_collection WHERE group_name = 'religion';
-- -----------------------------------------------------------------------------
WITH mixed_grouped_collection AS (
    SELECT
        cast('chorke.org' AS varchar) "domain",
        cast('religion'   AS varchar) "group_name",
        cast(raw_grouped_collection->>0 AS varchar) "code",
        cast(raw_grouped_collection->>1 AS varchar) "name",
        cast(raw_grouped_collection->>2 AS jsonb  ) "extended_properties",
        cast(raw_grouped_collection->>3 AS numeric) "order_coll"
    FROM jsonb_array_elements('[
        ["I", "Islam",     {}, 1],
        ["B", "Buddhist",  {}, 2],
        ["H", "Hindu",     {}, 3],
        ["C", "Christian", {}, 4],
        ["O", "Others",    {}, 5]
    ]'::jsonb) AS raw_grouped_collection
)
, new_grouped_collection AS (
    SELECT
        ROW_NUMBER() OVER() "id", LOCALTIMESTAMP "created_on",
        mgc.domain,  mgc.code, mgc.name, mgc.group_name,
        mgc.extended_properties, mgc.order_coll
    FROM mixed_grouped_collection mgc
    --  prevent old duplicate
    WHERE NOT EXISTS (
        SELECT id FROM grouped_collection
        WHERE (code, group_name, domain) = (mgc.code, mgc.group_name, mgc.domain)
    )
)
SELECT ngc.* FROM new_grouped_collection ngc;
-- INSERT INTO grouped_collection (id, created_on, domain, code, name, group_name, extended_properties, order_coll)
-- SELECT (SELECT (COALESCE(MAX(id), 0) + ngc.id) FROM grouped_collection),
--     ngc.created_on, ngc.domain, ngc.code, ngc.name, ngc.group_name,
--     ngc.extended_properties, ngc.order_coll
-- FROM new_grouped_collection ngc
-- --  prevent new duplicate
-- WHERE NOT EXISTS (
--     SELECT id FROM grouped_collection
--     WHERE (code, group_name, domain) = (ngc.code, ngc.group_name, ngc.domain)
-- )
-- ORDER BY (ngc.order_coll, ngc.id) ASC;

Delete Join

-- USING is not an ANSI standard not supported by others RDMBS
-- it's better to use sub query rather than USING for join
DELETE FROM product p USING category c
WHERE p.category_id = c.id AND
c.name = 'Grocery';

Sequence

CREATE SEQUENCE IF NOT EXISTS public.policy_1000
    INCREMENT 1
    START     46656   -- 1,000
    MINVALUE  46656   -- 1,000
    MAXVALUE  1679615 -- Z,ZZZ
    CACHE     10;

ALTER SEQUENCE public.policy_1000 OWNER TO chorke_init_pro;
COMMENT ON SEQUENCE public.policy_1000 IS 'Academia Policy Sequence';

SELECT CURRVAL('policy_1000');
SELECT SETVAL('policy_1000', 0);
ALTER  SEQUENCE IF EXISTS policy_1000 RESTART 1;
SELECT NEXTVAL('policy_1000')::int AS policy_id;

GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO chorke_init_pro;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO chorke_init_pro;

Echo/Print

DO $BODY$
DECLARE
    p_jsonb jsonb;
BEGIN
--  SELECT INTO p_jsonb '["", "template","{templateType}","{productSlug}"]'::jsonb || '["{filePath}"]'::jsonb;
    p_jsonb := '["", "template","{templateType}","{productSlug}"]'::jsonb || '["{filePath}"]'::jsonb;
    RAISE NOTICE '%', p_jsonb;
END;
$BODY$;

Execute DDL

Insert Using Loop

DO $BODY$
DECLARE
    p_id         integer;
    p_domain     varchar:= 'chorke.org';
    p_group_name varchar:= 'occupation_class';
BEGIN
  SELECT INTO p_id (COALESCE(MAX(id), 0)) FROM grouped_collection;
    FOR p_code IN 1..5 LOOP
        INSERT INTO grouped_collection (id, created_on, domain, code, name, group_name, extended_properties)
        VALUES ((p_id + p_code), LOCALTIMESTAMP, p_domain, p_code, p_code, p_group_name, '{}');
    END LOOP;
END;
$BODY$;

Anonymous Blocks

DO
$BODY$
DECLARE
    fiscal_year_ukc character varying;
    fiscal_drop_ukc character varying := 'ALTER TABLE fiscal_year';
BEGIN
    SELECT
        INTO fiscal_year_ukc con.conname 
    FROM  pg_catalog.pg_constraint     con
    INNER JOIN pg_catalog.pg_class     rel ON rel.oid = con.conrelid
    INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
    WHERE
        nsp.nspname = 'public'      AND
        rel.relname = 'fiscal_year' AND
        con.conname LIKE 'uk_%';
 
  IF fiscal_year_ukc IS NOT NULL THEN
    fiscal_drop_ukc := fiscal_drop_ukc || ' '
    'DROP CONSTRAINT ' || fiscal_year_ukc;
    
      EXECUTE fiscal_drop_ukc;
  END IF;
END
$BODY$;

Create Sequence

CREATE OR REPLACE FUNCTION fn_get_policy_no(IN agent_no character varying)
    RETURNS  integer
    LANGUAGE 'plpgsql'
AS
$BODY$
DECLARE
    policy_no       integer;
    sequence_name   character varying := 'policy_' || agent_no;
    create_sequence character varying := 'CREATE SEQUENCE IF NOT EXISTS ' || sequence_name;
BEGIN
    create_sequence := create_sequence || ' '
    'INCREMENT 1       '
    'START     46656   ' -- 1,000
    'MINVALUE  46656   ' -- 1,000
    'MAXVALUE  1679615 ' -- Z,ZZZ
    'CACHE     10      ';
    
    EXECUTE create_sequence;
    SELECT INTO policy_no NEXTVAL(sequence_name)::int;
    RETURN policy_no;
END
$BODY$;

SELECT fn_get_policy_no('0001');

Create Sequence with Comment

CREATE OR REPLACE FUNCTION fn_get_policy_no(IN agent_no character varying, IN agent_desc character varying)
    RETURNS  integer
    LANGUAGE 'plpgsql'
AS
$BODY$
DECLARE
    policy_no        integer;
    sequence_name    character varying := 'policy_' || agent_no;
    create_sequence  character varying := 'CREATE SEQUENCE IF NOT EXISTS ' || sequence_name;
    comment_sequence character varying := 'COMMENT ON SEQUENCE ' || sequence_name ||' IS ''' || agent_desc || '''';
BEGIN
    create_sequence := create_sequence || ' '
    'INCREMENT 1       '
    'START     46656   ' -- 1,000
    'MINVALUE  46656   ' -- 1,000
    'MAXVALUE  1679615 ' -- Z,ZZZ
    'CACHE     10      ';
    
    EXECUTE create_sequence;
  EXECUTE comment_sequence;
    SELECT INTO policy_no NEXTVAL(sequence_name)::int;
    RETURN policy_no;
END
$BODY$;

SELECT fn_get_policy_no('0001', 'Fareast Islami Life Insurance');
-- @Query(value="SELECT fn_get_policy_no(:agent_no, :agent_desc)", nativeQuery = true)
-- Integer getPolicyNo(@Param("agent_no") String agentNo, @Param("agent_desc") String agentDesc);

Create From Table Exclude Column

DO
$BODY$
DECLARE
    create_table character varying;
BEGIN
    SELECT INTO create_table 'CREATE TABLE IF NOT EXISTS invoice__temp AS SELECT ' || STRING_AGG(isc.column_name, ', ') || ' FROM invoice'
    FROM (SELECT * FROM information_schema.columns WHERE table_name = 'invoice' AND
    column_name NOT IN ('id', 'created_on') ORDER BY ordinal_position ASC) isc;
    EXECUTE create_table;
END
$BODY$;

Date & Time

SELECT * FROM policy 
WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd');

UPDATE policy SET deleted_on = LOCALTIMESTAMP
WHERE DATE_TRUNC('day', created_on) = TO_DATE('2021-12-31', 'yyyy-mm-dd');
SELECT EXTRACT(CENTURY FROM TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_TRUNC('year', TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_TRUNC('hour', TIMESTAMP '2013-10-10 10:10:10');
SELECT AGE(TIMESTAMP '2023-10-10', TIMESTAMP '1983-10-10');
SELECT EXTRACT(DAY FROM TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_PART('day', TIMESTAMP '2013-10-10 10:10:10');
SELECT DATE_PART('hour', INTERVAL '4 hours 3 minutes');

SELECT JUSTIFY_INTERVAL(INTERVAL '1 mon -1 hour');
SELECT JUSTIFY_HOURS(INTERVAL '27 hours');
SELECT JUSTIFY_DAYS(INTERVAL '35 days');
SELECT AGE(TIMESTAMP '1983-10-10');

SELECT CURRENT_TIMESTAMP(2)
SELECT CURRENT_TIMESTAMP;
SELECT LOCALTIMESTAMP;
SELECT CURRENT_TIME;
SELECT CURRENT_DATE;

String Format

SELECT STRING_TO_ARRAY('/my/request/path', '/') "string_array";
SELECT ARRAY_TO_JSON(STRING_TO_ARRAY('/my/request/path', '/')) "json_array";

Number Format

SELECT NULLIF(regexp_replace('CKI00109'     , '\D','','g'), '')::numeric;
SELECT NULLIF(regexp_replace('CKI0010#9####', '\D','','g'), '')::numeric;
SELECT NULLIF(regexp_replace('CKI2203000075', '\D','','g'), '')::numeric;

Restoring the Dump

export dumps
pg_dump academia > academia_20200210_1020.sql
pg_dump academia| gzip > academia_2020021.sql.gz
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity WHERE datname IN
(
    'academia'
);
DROP DATABASE IF EXISTS academia;
CREATE DATABASE academia;
restore dumps
psql academia < academia_20200210_1020.sql
gunzip -c academia_20200210_1020.sql.gz | psql academia

export table as csv

psql -U postgres
\c my_database_staging
\COPY my_table_name TO '/home/academia/Downloads/my_table_name.csv' DELIMITER ',' CSV HEADER;

OS User Auth

CREATE DATABASE academia;
CREATE USER academia WITH LOGIN;
GRANT ALL PRIVILEGES ON DATABASE academia TO academia;
-- CREATE USER academia WITH ENCRYPTED PASSWORD 'sadaqah!';
echo "os-map          academia                academia" >> $PG_DATA/pg_ident.conf
echo "host    all     all     127.0.0.1/32    ident map=os-map" >> $PG_DATA/pg_hba.conf
pg_ctl --pgdata="$PG_DATA" reload;
CREATE EXTENSION dblink;
CREATE EXTENSION dblink SCHEMA extensions;

SELECT * FROM pg_available_extensions;
SELECT * FROM pg_available_extensions WHERE name = 'dblink';

-- using pgsql block
DO
$BODY$
DECLARE
    p_db_host        varchar:= host(inet_server_addr());
    p_db_port        varchar:= inet_server_port();
    p_db_user        varchar:= CURRENT_USER;
    p_db_pass        varchar:= 'p@$$w0rd';
    p_db_name        varchar:= 'postgres';

    p_db_extn_name   varchar;
    p_db_exec_query  varchar;
    p_db_conn_format varchar:= 'hostaddr=%s port=%s dbname=%s user=%s password=%s';
    p_db_conn_string varchar:= format(p_db_conn_format, p_db_host, p_db_port, p_db_name, p_db_user, p_db_pass);
BEGIN
    p_db_exec_query  := 'SELECT name FROM pg_available_extensions WHERE name = ''dblink''';
    SELECT INTO p_db_extn_name name FROM dblink(p_db_conn_string, p_db_exec_query) AS extensions(name varchar);
    RAISE NOTICE 'Extension Name: %', p_db_extn_name;
END
$BODY$;

-- using pgsql query
WITH user_info AS (
    SELECT
        'hostaddr=%s port=%s dbname=%s user=%s password=%s' "db_conn_format",
        host(inet_server_addr()) "db_host",
        inet_server_port()       "db_port",
        CURRENT_USER             "db_user",
        'p@$$w0rd'               "db_pass",
        'postgres'               "db_name"
)
, conn_info AS (
    SELECT
        db_conn_format,
        format(db_conn_format, db_host, db_port, db_name, db_user, db_pass) "db_conn_string",
        'SELECT name FROM pg_available_extensions WHERE name = ''dblink'''  "db_exec_query"
    FROM user_info
)
SELECT * FROM dblink(
    (SELECT db_conn_string FROM conn_info),
    (SELECT db_exec_query  FROM conn_info)
) AS extensions(name varchar)
DO
$BODY$
DECLARE
    p_connstr text := 'hostaddr=127.0.0.1 port=5432 dbname=academia';
BEGIN
    PERFORM dblink_exec(p_connstr, format('UPDATE commission SET type = ''%s'' WHERE type = ''%s''', 'new_type', 'old_type'));
END
$BODY$;
DO
$BODY$
DECLARE
    p_schema varchar := 'academia';
    p_passwd varchar := 'p@$$w0rd';
    p_dbname varchar := 'academia_audit_staging';
    p_dblink varchar := 'hostaddr=127.0.0.1 port=5432 dbname=postgres';
BEGIN
    SELECT INTO p_dblink
        'hostaddr='|| host(inet_server_addr()) ||
        ' port='   || inet_server_port()       || ' dbname=' || current_database();
    
    RAISE NOTICE '%', p_dblink;
    IF NOT EXISTS (SELECT FROM pg_catalog.pg_database WHERE datname = p_dbname) THEN
    --  PERFORM dblink_exec(p_dblink, format('CREATE DATABASE %s', p_dbname));
        RAISE NOTICE 'CREATE DATABASE %', p_dbname;
    END IF;

    IF NOT EXISTS (SELECT FROM pg_catalog.pg_user WHERE usename = p_schema) THEN
    --  PERFORM format('CREATE USER %s WITH ENCRYPTED PASSWORD ''%s''', p_schema, p_passwd);
        RAISE NOTICE 'CREATE USER % WITH ENCRYPTED PASSWORD ''%''', p_schema, p_passwd;
    END IF;

--  PERFORM format('GRANT ALL PRIVILEGES ON DATABASE %s TO %s', p_dbname, p_schema);
    RAISE NOTICE 'GRANT ALL PRIVILEGES ON DATABASE % TO %', p_dbname, p_schema;
END
$BODY$;

Recursive Query

WITH RECURSIVE hierarchy AS (
    SELECT
        o.id,
        o.code,   o.name,
        o.parent "parent_code",
        p.name   "parent_name"
    FROM       org_unit  o
    LEFT  JOIN org_unit  p ON p.code        = o.parent
    WHERE o.code = LOWER(REPLACE('Chorke Agency', ' ', '-'))
    UNION
    SELECT
        o.id,
        o.code,   o.name,
        o.parent "parent_code",
        p.name   "parent_name"
    FROM       org_unit  o 
    LEFT  JOIN org_unit  p ON p.code        = o.parent
    INNER JOIN hierarchy h ON h.parent_code = o.code
)
SELECT * FROM hierarchy;

Encode/Decode

SELECT decode('Chorke Academia, Inc.', 'escape');            -- text to binary
SELECT encode('Chorke Academia, Inc.'::bytea, 'escape');     -- binary to text

SELECT convert_to('Chorke Academia, Inc.', 'UTF8');          -- text to binary
SELECT convert_from('Chorke Academia, Inc.'::bytea, 'UTF8'); -- binary to text

SELECT encode('Chorke Academia, Inc.'::bytea, 'base64');                       -- text to base64
SELECT convert_from(decode('Q2hvcmtlIEFjYWRlbWlhLCBJbmMu', 'base64'), 'UTF8'); -- base64 to text

Base36 Encode

CREATE OR REPLACE FUNCTION fn_base36_encode(IN base10 bigint)
    RETURNS  varchar
    LANGUAGE plpgsql IMMUTABLE
AS
$BODY$
DECLARE
    base36 varchar := '';
    intval bigint  := abs(base10);
    char0z char[]  := regexp_split_to_array('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ', '');
BEGIN
    WHILE intval != 0 LOOP
        base36 := char0z[(intval % 36)+1] || base36;
        intval := intval / 36;
    END LOOP;

    IF base10 = 0 THEN base36 := '0'; END IF;
    RETURN base36;
END
$BODY$;

Base36 Decode

CREATE OR REPLACE FUNCTION fn_base36_decode(IN base36 varchar)
    RETURNS  bigint
    LANGUAGE plpgsql
AS
$BODY$
DECLARE
    rindex int;
    intval int;
    carray char[];
    base10 bigint  := 0;
    char0z varchar := '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ';
BEGIN
    FOR rindex IN REVERSE char_length(base36)..1 LOOP
        carray := carray || substring(upper(base36) FROM rindex FOR 1)::char;
    END LOOP;

    rindex := 0;
    WHILE rindex < (array_length(carray,1)) LOOP      
        intval := position(carray[rindex+1] IN char0z)-1;
        base10 := base10 + (intval * (36 ^ rindex));
        rindex := rindex + 1;
    END LOOP;

    RETURN base10;
END
$BODY$;

Large Object

\dfS lo_*
-- lo_open(lobjId oid, mode integer) returns integer
-- The mode parameter to lo_open uses two constants:
--   INV_READ       = 0x20000
--   INV_WRITE      = 0x40000
--   INV_READ_WRITE = 0x60000

SELECT  x'20000'::int;          -- SELECT cast(x'20000' AS integer);          -- 131072
SELECT  x'40000'::int;          -- SELECT cast(x'40000' AS integer);          -- 262144
SELECT  x'60000'::int;          -- SELECT cast(x'60000' AS integer);          -- 393216
SELECT (x'20000'|x'40000')::int -- SELECT cast(x'20000'|x'40000' AS integer); -- 393216

-- example: a
SELECT lo_get(photo::oid), lo_get(resume::oid) FROM employee;                        -- select large object as binary data
SELECT lo_from_bytea(0, 'Chorke Academia, Inc.'::bytea) FROM employee;               -- create large object from ascii text
SELECT lo_from_bytea(0, convert_to('Chorke Academia, Inc.', 'UTF8')) FROM employee;  -- create large object from utf-8 text
-- example: b
SELECT resume FROM profile;                                   -- oid
SELECT lo_get(resume::oid) FROM profile;                      -- binary
SELECT encode(lo_get(resume::oid), 'escape') FROM profile;    -- binary to text
SELECT encode(lo_get(resume::oid), 'base64') FROM profile;    -- binary to base64
-- example: c
SELECT convert_from(lo_get(resume::oid), 'UTF8') FROM profile;
SELECT convert_from(loread(lo_open(resume::oid, x'40000'::int), x'40000'::int), 'UTF8') FROM profile;

SELECT lowrite(lo_open(lo_creat(-1), x'60000'::int), convert_to('Chorke Academia, Inc.', 'UTF8')) FROM profile; -- create large object
SELECT lowrite(lo_open(resume::oid,  x'60000'::int), convert_to('Chorke Academia, Inc.', 'UTF8')) FROM profile; -- update large object

Insert

-- right way to insert in insert
INSERT INTO employee(id, resume, name) VALUES(
    (SELECT COALESCE(max(id), 0) + 1 FROM employee),
    lo_from_bytea(0, convert_to('Chorke Academia, Inc.', 'UTF8')),
    'Chorke Academia, Inc.'
);

Select

-- right way to select in select
SELECT
    id,
    name,
    resume AS resume_oid,
    lo_get(resume::oid) AS resume_binary,
    convert_from(lo_get(resume::oid), 'UTF8') AS resume_text
FROM employee;

Update

-- right way to create in update
-- wrong way to update in update
-- SELECT oid FROM pg_largeobject_metadata;
UPDATE employee SET
    resume = lo_from_bytea(0, convert_to('Chorke Academia, Inc.', 'UTF8'))
WHERE id = 1;

-- SELECT oid FROM pg_largeobject_metadata;
-- we should've to have 1 oid for a sigle record where it will
-- be created N+1 oid for N times of update. after N times of
-- update we need to delete N oid's from database else it will
-- be wastage of huge disk space if those object not deleted!
-- we need to delete manually and which one might be disaster
-- SELECT lo_unlink(orphan_lo::oid);
-- right way to update in update
-- SELECT oid FROM pg_largeobject_metadata;
DO
$BODY$
DECLARE
    p_id     integer:= 1;
    p_oid    integer:= 0;
    p_resume varchar:= 'Chorke Academia, Inc.';
BEGIN
    SELECT INTO p_oid COALESCE(m.oid, 0) FROM employee e
    LEFT JOIN pg_largeobject_metadata m ON e.resume::oid = m.oid WHERE e.id = p_id;

    IF p_oid > 0 THEN
        PERFORM lowrite(lo_open(p_oid::oid, x'60000'::int), convert_to(p_resume, 'UTF8'));
    ELSE
        UPDATE employee SET
            resume = lo_from_bytea(0, convert_to(p_resume, 'UTF8'))
        WHERE id = p_id;
    END IF;
END
$BODY$;

-- SELECT oid FROM pg_largeobject_metadata;
-- SELECT * FROM employee

Delete

-- wrong way to unlink in delete
-- SELECT oid FROM pg_largeobject_metadata;
DELETE FROM employee WHERE id = 1;

-- SELECT oid FROM pg_largeobject_metadata;
-- we should've to unlink respective oid's for those deleted
-- records. Either it will be root causes of wasting of huge
-- disk space if those object not deleted! we need to delete
-- manually and which one might be disaster
-- SELECT lo_unlink(orphan_lo::oid);
-- right way to unlink in delete
-- SELECT oid FROM pg_largeobject_metadata;
DO
$BODY$
DECLARE
    p_id     integer:= 1;
    p_oid    integer:= 0;
    p_resume varchar:= 'Chorke Academia, Inc.';
BEGIN
    SELECT INTO p_oid COALESCE(m.oid, 0) FROM employee e
    LEFT JOIN pg_largeobject_metadata m ON e.resume::oid = m.oid WHERE e.id = p_id;
    DELETE FROM employee WHERE id = p_id;

    IF p_oid > 0 THEN
        PERFORM lo_unlink(p_oid::oid);
    END IF;
END
$BODY$;

-- SELECT oid FROM pg_largeobject_metadata;
-- SELECT * FROM employee;

Alter

ALTER TABLE employee
--  ALTER COLUMN photo TYPE bytea USING lo_get(photo::oid);
    ALTER COLUMN photo TYPE text  USING encode(lo_get(photo::oid), 'base64');

Bastion Dump

BASH_REMOTE_SCRIPT_FRMT=$(cat <<'EOF'
# suppress stdout
echo '/* $STDOUT$'
echo "${HOSTNAME}${HOME}"

ssh -qt gtw.vpc.chorke.org <<'EOF_00'
echo "${HOSTNAME}${HOME}"
ssh -qt app.vpc.chorke.org <<'EOF_01'
echo "${HOSTNAME}${HOME}"
echo '$STDOUT$ */'

pg_dump postgres://academia:%s@rds.vpc.chorke.org:5432/%s
# suppress stdout
echo '/* $STDOUT$'
EOF_01
EOF_00
echo '$STDOUT$ */'
EOF
)

BASH_REMOTE_SCRIPT_PASS='sadaqah!'
BASH_REMOTE_SCRIPT_PGDB='academia_keycloak_staging'
printf -v  BASH_REMOTE_SCRIPT_EXEC          "${BASH_REMOTE_SCRIPT_FRMT}" "${BASH_REMOTE_SCRIPT_PASS}" "${BASH_REMOTE_SCRIPT_PGDB}"
printf -v  BASH_EXPORT_SCRIPT_DUMP '%s.sql' "${BASH_REMOTE_SCRIPT_PGDB}"
bash -c "${BASH_REMOTE_SCRIPT_EXEC}" >       ${BASH_EXPORT_SCRIPT_DUMP}

sed '/^\/\* \$STDOUT\$$/,/^\$STDOUT\$ \*\/$/{/^\/\* \$STDOUT\$$/!{/^\$STDOUT\$ \*\/$/!d}}' -i ${BASH_EXPORT_SCRIPT_DUMP}
sed -z 's|\n/\* $STDOUT$\n$STDOUT$ \*/||g' -i ${BASH_EXPORT_SCRIPT_DUMP}
sed -z 's|/\* $STDOUT$\n$STDOUT$ \*/\n||g' -i ${BASH_EXPORT_SCRIPT_DUMP}
gzip ${BASH_EXPORT_SCRIPT_DUMP}

Export CSV

psql -U academia academia_audit_staging
\COPY (SELECT * FROM audit_log WHERE action_type = 'Sync Collection' AND DATE_TRUNC('day', logged_on) = '20241010')\
 To '~/.config/audit_log_20241010_1010_MYT.csv' With CSV DELIMITER ',' HEADER;

Knowledge

wsl (windows subsystem for linux)
sudo service redis-server restart
sudo service postgresql restart
sudo service apache2 restart
comment and uncomment sql
sed -i -e 's|^|-- |g' src/main/resources/db/migration/*.sql
sed -i -e 's|^-- ||g' src/main/resources/db/migration/*.sql
-- psql
CREATE DATABASE chorke_init_pro;
CREATE USER chorke_init_pro WITH ENCRYPTED PASSWORD 'pa55w0rd';
GRANT ALL PRIVILEGES ON DATABASE chorke_init_pro TO chorke_init_pro;
-- \q
-- revoke public connect from specific database after creation
REVOKE CONNECT ON DATABASE academia_ebis_dev FROM PUBLIC;
-- revoke public connect for all future databases
REVOKE CONNECT ON DATABASE template1 FROM PUBLIC;
SELECT
    con.conname,
    nsp.nspname,
    rel.relname
FROM pg_catalog.pg_constraint con
    INNER JOIN pg_catalog.pg_class rel     ON rel.oid = con.conrelid
    INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace
WHERE
    nsp.nspname = 'public'      AND
    rel.relname = 'fiscal_year' AND
    con.conname LIKE 'uk_%';
SELECT
    n.nspname AS "schema",
    c.relname AS "name",
    CASE c.relkind
        WHEN 'v' THEN 'view'
        WHEN 'i' THEN 'index'
        WHEN 'r' THEN 'table'
        WHEN 's' THEN 'special'
        WHEN 'S' THEN 'sequence'
        WHEN 'f' THEN 'foreign table'
        WHEN 'p' THEN 'partitioned table'
        WHEN 'I' THEN 'partitioned index'
        WHEN 'm' THEN 'materialized view'
    END AS "type",
    pg_catalog.pg_get_userbyid(c.relowner) AS "owner"
FROM pg_catalog.pg_class c
    LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('S','')
    AND n.nspname !~ '^pg_toast'
    AND n.nspname <> 'pg_catalog'
    AND n.nspname <> 'information_schema'
    AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY 1,2;
SELECT d.datname                                       AS "db_name",
    pg_catalog.pg_get_userbyid(d.datdba)               AS "db_owner",
    pg_catalog.pg_encoding_to_char(d.encoding)         AS "db_encoding",
    d.datcollate                                       AS "db_collation",
    d.datctype                                         AS "db_collation_type",
    pg_catalog.array_to_string(d.datacl, e'\n')        AS "db_access_privileges",
    CASE
        WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN
            pg_catalog.pg_size_pretty(pg_catalog.pg_database_size(d.datname))
        ELSE 'No Access'
    END                                                AS "db_size",
    t.spcname                                          AS "db_tablespace",
    pg_catalog.shobj_description(d.oid, 'pg_database') AS "db_description"
FROM pg_catalog.pg_database   d
JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid
ORDER BY 1;
SELECT
    usesysid  AS "user_id",
    usename   AS "user_name",
    passwd    AS "user_password",
    usesuper  AS "is_super_user"
FROM pg_catalog.pg_user ORDER BY 1;
DO
$BODY$
DECLARE
    p_password varchar :='p@$$w0rd';
    p_connection_string varchar :='hostaddr=127.0.0.1 port=5432 dbname=postgres user=postgres password=';
BEGIN
    p_connection_string := p_connection_string || p_password;
    RAISE NOTICE 'Connection String: %', p_connection_string;
    SELECT INTO p_connection_string
        'hostaddr=' || host(inet_server_addr()) || ' port=' || inet_server_port() ||
        ' dbname='  || current_database()       || ' user=' || CURRENT_USER       || ' password=' || p_password;
    RAISE NOTICE 'Connection String: %', p_connection_string;
END
$BODY$;
cat << EOF | psql -U postgres
DROP DATABASE IF EXISTS academia_ebis_staging;
CREATE DATABASE academia_ebis_staging;

CREATE USER academia WITH ENCRYPTED PASSWORD 'sadaqah!';
GRANT ALL PRIVILEGES ON DATABASE academia_ebis_staging TO academia;
ALTER DATABASE academia_ebis_staging OWNER TO academia;
-- ALTER USER academia WITH NOSUPERUSER;
-- ALTER USER academia WITH SUPERUSER;
EOF

Allow Remote

Docker » Compose » Manage

apt install -y iputils-ping telnet dnsutils
sudo systemctl status ufw
sudo ufw status verbose
sudo ufw status numbered
sudo ufw allow 5432/tcp
sudo ufw enable
nano /etc/postgresql/14/main/postgresql.conf
nano /etc/postgresql/16/main/postgresql.conf
nano /etc/postgresql/14/main/pg_hba.conf
nano /etc/postgresql/16/main/pg_hba.conf
listen_addresses = '*'                  # what IP address(es) to listen on;
                                        # comma-separated list of addresses;
                                        # defaults to 'localhost'; use '*' for all
                                        # (change requires restart)
port = 5432                             # (change requires restart)
# IPv4 local connections:
host    all             all             127.0.0.1/32            scram-sha-256
host    all             all             0.0.0.0/0               scram-sha-256
# host  all             all             10.19.83.1/24           scram-sha-256
# host  all             all             10.20.13.1/24           scram-sha-256

Spread Sheet

Spread Sheet

=CONCAT("(LOCALTIMESTAMP, 'religion', 'chorke.org', (SELECT (COALESCE(MAX(id), 0) + 1) FROM grouped_collection), '",  SUBSTITUTE(A2 , """" ,  ""), "', '", SUBSTITUTE(A2 , """" ,  ""),  "', '{""state"": """ , SUBSTITUTE(B2 , "'" , "''")  , """, ""city"": """ , SUBSTITUTE(C2 , "'" , "''") , """}'),")

=CONCAT("[""",  SUBSTITUTE(A2 , """",  ""), """, """, SUBSTITUTE(A2, """",  ""), "",""", {""state"": """ , SUBSTITUTE(B2 , "'" , "''")  , """, ""city"": """ , SUBSTITUTE(C2 , "'" , "''") , """}", "],")

=CONCAT("[""",  SUBSTITUTE(C518 , """",  ""), """, """, SUBSTITUTE(B518, """",  ""), "",""", {}, " , A518 , ", ",  IF(ISBLANK(D518), "false", "true"), "],")

=CONCAT("[""",  SUBSTITUTE(C5 , """",  ""), """, """, SUBSTITUTE(B5, """",  ""), "",""", {""class"": """, D5 ,""", ""income"": true}, " , A5, "],")

=CONCAT("[""",  SUBSTITUTE(C27 , """",  ""), """, """, SUBSTITUTE(B27, """",  ""), "",""", {}, " , A27, "],")

=CONCAT("[""",  C27, """, """, B27, "",""", {}, " , A27, "],")

Docker PSQL

Docker PSQL

docker run -d --name postgres\
 -e POSTGRES_PASSWORD=sadaqah\
 -v ./init.sql:/docker-entrypoint-initdb.d/init.sql\
 -p 127.0.0.1:5433:5432\
 postgres:12.14
docker exec -it postgres bash
psql -U postgres
 
docker stop postgres
docker rm postgres
docker exec -it postgres\
 psql -U postgres
 
docker ps -a
docker rm postgres -f

References

References